summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c81
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c58
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c77
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c25
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h11
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c197
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h26
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c123
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h48
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c28
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h73
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c78
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c86
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h16
-rw-r--r--drivers/net/ethernet/atheros/Kconfig10
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c1898
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c216
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c153
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c565
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c95
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c60
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h22
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c65
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c114
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c46
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h530
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c (renamed from drivers/net/ethernet/intel/iavf/i40e_adminq.c)267
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h (renamed from drivers/net/ethernet/intel/iavf/i40e_adminq.h)80
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h528
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c127
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h104
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c499
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c85
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h58
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h136
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c250
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c230
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1027
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c477
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c362
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c301
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c49
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c187
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c82
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c36
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h21
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c276
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h15
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c110
-rw-r--r--drivers/net/ethernet/mediatek/Makefile3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c323
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c97
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h177
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c287
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c64
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c26
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c783
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h232
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c357
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c227
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_s2.h64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c178
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.h22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h403
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c115
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h48
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c591
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h82
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c429
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h48
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c147
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c16
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1275
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c406
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c65
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h25
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c231
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.h39
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c (renamed from drivers/net/ethernet/realtek/r8169.c)854
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c118
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c850
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c258
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c20
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c677
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c111
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netdevsim/dev.c44
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/aquantia_main.c8
-rw-r--r--drivers/net/phy/bcm87xx.c20
-rw-r--r--drivers/net/phy/dp83867.c193
-rw-r--r--drivers/net/phy/lxt.c6
-rw-r--r--drivers/net/phy/nxp-tja11xx.c403
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c112
-rw-r--r--drivers/net/phy/phy_device.c107
-rw-r--r--drivers/net/phy/phylink.c288
-rw-r--r--drivers/net/phy/sfp-bus.c14
-rw-r--r--drivers/net/phy/sfp.c35
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxlan.c94
-rw-r--r--drivers/net/wan/hdlc_cisco.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
317 files changed, 20364 insertions, 5571 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 407f4095a37a..ad53e5f72990 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1077,12 +1077,16 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_ALL_TSO)
+
static void bond_compute_features(struct bonding *bond)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
+ netdev_features_t mpls_features = BOND_MPLS_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
@@ -1093,6 +1097,7 @@ static void bond_compute_features(struct bonding *bond)
if (!bond_has_slaves(bond))
goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
+ mpls_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
@@ -1101,6 +1106,11 @@ static void bond_compute_features(struct bonding *bond)
enc_features = netdev_increment_features(enc_features,
slave->dev->hw_enc_features,
BOND_ENC_FEATURES);
+
+ mpls_features = netdev_increment_features(mpls_features,
+ slave->dev->mpls_features,
+ BOND_MPLS_FEATURES);
+
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
@@ -1114,6 +1124,7 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_GSO_UDP_L4;
+ bond_dev->mpls_features = mpls_features;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 063c7a671b41..4b2f8d6f0744 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -825,6 +825,12 @@ static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
+static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+}
+
static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
@@ -895,6 +901,11 @@ static int mv88e6095_stats_get_sset_count(struct mv88e6xxx_chip *chip)
STATS_TYPE_PORT);
}
+static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0);
+}
+
static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
@@ -962,6 +973,13 @@ static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
+static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+}
+
static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
@@ -3444,6 +3462,44 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.phylink_validate = mv88e6352_phylink_validate,
};
+static const struct mv88e6xxx_ops mv88e6250_ops = {
+ /* MV88E6XXX_FAMILY_6250 */
+ .ieee_pri_map = mv88e6250_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6250_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6250_port_link_state,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6250_stats_get_sset_count,
+ .stats_get_strings = mv88e6250_stats_get_strings,
+ .stats_get_stats = mv88e6250_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6250_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6250_g1_reset,
+ .vtu_getnext = mv88e6250_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6065_phylink_validate,
+};
+
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
@@ -4229,6 +4285,27 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6240_ops,
},
+ [MV88E6250] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6250",
+ .num_databases = 64,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6290] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
.family = MV88E6XXX_FAMILY_6390,
@@ -4837,6 +4914,10 @@ static const struct of_device_id mv88e6xxx_of_match[] = {
.compatible = "marvell,mv88e6190",
.data = &mv88e6xxx_table[MV88E6190],
},
+ {
+ .compatible = "marvell,mv88e6250",
+ .data = &mv88e6xxx_table[MV88E6250],
+ },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index d3e10111a6fe..a3121a12bafc 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -58,6 +58,7 @@ enum mv88e6xxx_model {
MV88E6190X,
MV88E6191,
MV88E6240,
+ MV88E6250,
MV88E6290,
MV88E6320,
MV88E6321,
@@ -76,6 +77,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6250, /* 6250 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
@@ -108,6 +110,12 @@ struct mv88e6xxx_info {
* when it is non-zero, and use indirect access to internal registers.
*/
bool multi_chip;
+ /* Dual-chip Addressing Mode
+ * Some chips respond to only half of the 32 SMI addresses,
+ * allowing two to coexist on the same SMI interface.
+ */
+ bool dual_chip;
+
enum dsa_tag_protocol tag_protocol;
/* Mask for FromPort and ToPort value of PortVec used in ATU Move
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 09b8a3d0dd37..1323ef30a5e9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -178,7 +178,7 @@ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
return mv88e6185_g1_wait_ppu_polling(chip);
}
-int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
@@ -194,7 +194,14 @@ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
if (err)
return err;
- err = mv88e6xxx_g1_wait_init_ready(chip);
+ return mv88e6xxx_g1_wait_init_ready(chip);
+}
+
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6250_g1_reset(chip);
if (err)
return err;
@@ -295,6 +302,12 @@ int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
}
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
+{
+ /* Reset the IEEE Tag priorities to defaults */
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa50);
+}
+
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
@@ -375,26 +388,26 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
u16 ptr;
int err;
- /* 01:c2:80:00:00:00:00-01:c2:80:00:00:00:07 are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO;
+ /* 01:80:c2:00:00:00-01:80:c2:00:00:07 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:08-01:c2:80:00:00:00:0f are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI;
+ /* 01:80:c2:00:00:08-01:80:c2:00:00:0f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:20-01:c2:80:00:00:00:27 are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO;
+ /* 01:80:c2:00:00:20-01:80:c2:00:00:27 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:28-01:c2:80:00:00:00:2f are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI;
+ /* 01:80:c2:00:00:28-01:80:c2:00:00:2f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
@@ -461,7 +474,7 @@ int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
/* Offset 0x1d: Statistics Operation 2 */
-int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7bd5ab733a3f..d444266f7d78 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -186,10 +186,10 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL 0x1a
#define MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE 0x8000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_MASK 0x3f00
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO 0x0000
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI 0x0100
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO 0x0200
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI 0x0300
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO 0x0000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI 0x0100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO 0x0200
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI 0x0300
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
@@ -255,11 +255,11 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
-int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
@@ -274,7 +274,9 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
+
int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port);
@@ -301,6 +303,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 4542dfa5fc69..cef5046983e8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -90,7 +90,7 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
if (err)
return err;
} else {
- if (mv88e6xxx_num_databases(chip) > 16) {
+ if (mv88e6xxx_num_databases(chip) > 64) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
&val);
@@ -102,6 +102,9 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
val);
if (err)
return err;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ op |= (fid & 0x30) << 4;
}
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 3e9be3f51196..45040f963142 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -303,6 +303,35 @@ static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_vtu_vid_read(chip, entry);
}
+int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[5:4] are located in VTU Operation 9:8
+ */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & 0x000f;
+ entry->fid |= (val & 0x0300) >> 4;
+ }
+
+ return 0;
+}
+
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -392,6 +421,35 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return 0;
}
+int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[5:4] are located in VTU Operation 9:8
+ */
+ op |= entry->fid & 0x000f;
+ op |= (entry->fid & 0x0030) << 8;
+ }
+
+ return mv88e6xxx_g1_vtu_op(chip, op);
+}
+
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 1546171210a1..b176ee1adbba 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -812,6 +812,32 @@ const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
.irq_free = mv88e6097_watchdog_free,
};
+static void mv88e6250_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, MV88E6250_G2_WDOG_CTL, &reg);
+
+ reg &= ~(MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE);
+
+ mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL, reg);
+}
+
+static int mv88e6250_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL,
+ MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6250_G2_WDOG_CTL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
+ .irq_action = mv88e6097_watchdog_action,
+ .irq_setup = mv88e6250_watchdog_setup,
+ .irq_free = mv88e6250_watchdog_free,
+};
+
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index bfb2c6123f55..a664fc25f132 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -202,6 +202,18 @@
#define MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK 0x00ff
/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6250_G2_WDOG_CTL 0x1b
+#define MV88E6250_G2_WDOG_CTL_QC_HISTORY 0x0100
+#define MV88E6250_G2_WDOG_CTL_QC_EVENT 0x0080
+#define MV88E6250_G2_WDOG_CTL_QC_ENABLE 0x0040
+#define MV88E6250_G2_WDOG_CTL_EGRESS_HISTORY 0x0020
+#define MV88E6250_G2_WDOG_CTL_EGRESS_EVENT 0x0010
+#define MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6250_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6250_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6250_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
#define MV88E6352_G2_WDOG_CTL 0x1b
#define MV88E6352_G2_WDOG_CTL_EGRESS_EVENT 0x0080
#define MV88E6352_G2_WDOG_CTL_RMU_TIMEOUT 0x0040
@@ -330,6 +342,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
@@ -480,6 +493,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
+static const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {};
static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 9a2b4b385a2c..04309ef0a1cc 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -290,6 +290,18 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
}
+/* Support 10, 100 Mbps (e.g. 88E6250 family) */
+int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = 100;
+
+ if (speed > 100)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+}
+
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
@@ -517,6 +529,71 @@ int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
+int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ if (port < 5) {
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_FULL;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_FULL;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_FULL;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_FULL;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+ }
+
+ state->link = !!(reg & MV88E6250_PORT_STS_LINK);
+ state->an_enabled = 1;
+ state->an_complete = state->link;
+
+ return 0;
+}
+
int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f2fba3f73199..8d5a6cd6fb19 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -19,6 +19,16 @@
#define MV88E6XXX_PORT_STS_MY_PAUSE 0x4000
#define MV88E6XXX_PORT_STS_HD_FLOW 0x2000
#define MV88E6XXX_PORT_STS_PHY_DETECT 0x1000
+#define MV88E6250_PORT_STS_LINK 0x1000
+#define MV88E6250_PORT_STS_PORTMODE_MASK 0x0f00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
@@ -108,6 +118,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6321 0x3100
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6141 0x3400
@@ -275,6 +286,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
@@ -328,6 +340,8 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
+int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 92e9324f1fb9..5fc78a063843 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -20,6 +20,10 @@
* When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
* multiple devices to share the SMI interface. In this mode it responds to only
* 2 registers, used to indirectly access the internal SMI devices.
+ *
+ * Some chips use a different scheme: Only the ADDR4 pin is used for
+ * configuration, and the device responds to 16 of the 32 SMI
+ * addresses, allowing two to coexist on the same SMI interface.
*/
static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
@@ -72,6 +76,23 @@ static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
.write = mv88e6xxx_smi_direct_write,
};
+static int mv88e6xxx_smi_dual_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr + dev, reg, data);
+}
+
+static int mv88e6xxx_smi_dual_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ return mv88e6xxx_smi_direct_write(chip, chip->sw_addr + dev, reg, data);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_dual_direct_ops = {
+ .read = mv88e6xxx_smi_dual_direct_read,
+ .write = mv88e6xxx_smi_dual_direct_write,
+};
+
/* Offset 0x00: SMI Command Register
* Offset 0x01: SMI Data Register
*/
@@ -140,7 +161,9 @@ static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
- if (sw_addr == 0)
+ if (chip->info->dual_chip)
+ chip->smi_ops = &mv88e6xxx_smi_dual_direct_ops;
+ else if (sw_addr == 0)
chip->smi_ops = &mv88e6xxx_smi_direct_ops;
else if (chip->info->multi_chip)
chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index b043bfc408f2..61d00682de60 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -55,6 +55,11 @@ struct sja1105_info {
const struct sja1105_regs *regs;
int (*reset_cmd)(const void *ctx, const void *data);
int (*setup_rgmii_delay)(const void *ctx, int port);
+ /* Prototypes from include/net/dsa.h */
+ int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
const char *name;
};
@@ -142,7 +147,20 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry, bool keep);
-u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+enum sja1105_iotag {
+ SJA1105_C_TAG = 0, /* Inner VLAN header */
+ SJA1105_S_TAG = 1, /* Outer VLAN header */
+};
+
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
/* Common implementations for the static and dynamic configs */
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index e73ab28bf632..352bb6e89297 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -35,17 +35,72 @@
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
+struct sja1105_dyn_cmd {
+ bool search;
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+enum sja1105_hostcmd {
+ SJA1105_HOSTCMD_SEARCH = 1,
+ SJA1105_HOSTCMD_READ = 2,
+ SJA1105_HOSTCMD_WRITE = 3,
+ SJA1105_HOSTCMD_INVALIDATE = 4,
+};
+
static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
+ u64 lockeds = 0;
+ u64 hostcmd;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &lockeds, 28, 28, size, op);
sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+
+ /* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
+ * using it to delete a management route was unsupported. UM10944
+ * said about it:
+ *
+ * In case of a write access with the MGMTROUTE flag set,
+ * the flag will be ignored. It will always be found cleared
+ * for read accesses with the MGMTROUTE flag set.
+ *
+ * SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
+ * is now another flag called HOSTCMD which does more stuff (quoting
+ * from UM11040):
+ *
+ * A write request is accepted only when HOSTCMD is set to write host
+ * or invalid. A read request is accepted only when HOSTCMD is set to
+ * search host or read host.
+ *
+ * So it is possible to translate a RDWRSET/VALIDENT combination into
+ * HOSTCMD so that we keep the dynamic command API in place, and
+ * at the same time achieve compatibility with the management route
+ * command structure.
+ */
+ if (cmd->rdwrset == SPI_READ) {
+ if (cmd->search)
+ hostcmd = SJA1105_HOSTCMD_SEARCH;
+ else
+ hostcmd = SJA1105_HOSTCMD_READ;
+ } else {
+ /* SPI_WRITE */
+ if (cmd->valident)
+ hostcmd = SJA1105_HOSTCMD_WRITE;
+ else
+ hostcmd = SJA1105_HOSTCMD_INVALIDATE;
+ }
+ sja1105_packing(p, &hostcmd, 25, 23, size, op);
+
/* Hack - The hardware takes the 'index' field within
* struct sja1105_l2_lookup_entry as the index on which this command
* will operate. However it will ignore everything else, so 'index'
@@ -54,9 +109,8 @@ sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
* such that our API doesn't need to ask for a full-blown entry
* structure when e.g. a delete is requested.
*/
- sja1105_packing(buf, &cmd->index, 29, 20,
+ sja1105_packing(buf, &cmd->index, 15, 6,
SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
- /* TODO hostcmd */
}
static void
@@ -107,6 +161,36 @@ static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static void
+sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+
+ /* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
+ * is the same (driver uses it to confirm that frame was sent).
+ * So just keep the name from E/T.
+ */
+ sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
+ sja1105_packing(buf, &entry->takets, 70, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ return size;
+}
+
/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
* and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
* between entry (0x2d, 0x2e) and command (0x30).
@@ -240,6 +324,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
+#define OP_SEARCH BIT(3)
/* SJA1105E/T: First generation */
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
@@ -304,14 +389,22 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_XMII_PARAMS] = {0},
};
-/* SJA1105P/Q/R/S: Second generation: TODO */
+/* SJA1105P/Q/R/S: Second generation */
struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105pqrs_l2_lookup_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
- .access = (OP_READ | OP_WRITE | OP_DEL),
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
- .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105pqrs_mgmt_route_entry_packing,
+ .cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
},
[BLK_IDX_L2_POLICING] = {0},
@@ -359,6 +452,24 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_XMII_PARAMS] = {0},
};
+/* Provides read access to the settings through the dynamic interface
+ * of the switch.
+ * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
+ * The selection is limited by the hardware in respect to which
+ * configuration blocks can be read through the dynamic interface.
+ * @index is used to retrieve a particular table entry. If negative,
+ * (and if the @blk_idx supports the searching operation) a search
+ * is performed by the @entry parameter.
+ * @entry Type-casted to an unpacked structure that holds a table entry
+ * of the type specified in @blk_idx.
+ * Usually an output argument. If @index is negative, then this
+ * argument is used as input/output: it should be pre-populated
+ * with the element to search for. Entries which support the
+ * search operation will have an "index" field (not the @index
+ * argument to this function) and that is where the found index
+ * will be returned (or left unmodified - thus negative - if not
+ * found).
+ */
int sja1105_dynamic_config_read(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry)
@@ -375,8 +486,10 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops = &priv->info->dyn_ops[blk_idx];
- if (index >= ops->max_entry_count)
+ if (index >= 0 && index >= ops->max_entry_count)
return -ERANGE;
+ if (index < 0 && !(ops->access & OP_SEARCH))
+ return -EOPNOTSUPP;
if (!(ops->access & OP_READ))
return -EOPNOTSUPP;
if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
@@ -388,9 +501,20 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
cmd.valid = true; /* Trigger action on table entry */
cmd.rdwrset = SPI_READ; /* Action is read */
- cmd.index = index;
+ if (index < 0) {
+ /* Avoid copying a signed negative number to an u64 */
+ cmd.index = 0;
+ cmd.search = true;
+ } else {
+ cmd.index = index;
+ cmd.search = false;
+ }
+ cmd.valident = true;
ops->cmd_packing(packed_buf, &cmd, PACK);
+ if (cmd.search)
+ ops->entry_packing(packed_buf, entry, PACK);
+
/* Send SPI write operation: read config table entry */
rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
packed_buf, ops->packed_size);
@@ -416,7 +540,7 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
* So don't error out in that case.
*/
if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
- return -EINVAL;
+ return -ENOENT;
cpu_relax();
} while (cmd.valid && --retries);
@@ -448,6 +572,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
if (index >= ops->max_entry_count)
return -ERANGE;
+ if (index < 0)
+ return -ERANGE;
if (!(ops->access & OP_WRITE))
return -EOPNOTSUPP;
if (!keep && !(ops->access & OP_DEL))
@@ -510,7 +636,7 @@ static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
* is also received as argument in the Koopman notation that the switch
* hardware stores it in.
*/
-u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params =
priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 77be59546a55..740dadf43f01 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -7,13 +7,10 @@
#include "sja1105.h"
#include <linux/packing.h>
-struct sja1105_dyn_cmd {
- u64 valid;
- u64 rdwrset;
- u64 errors;
- u64 valident;
- u64 index;
-};
+/* Special index that can be used for sja1105_dynamic_config_read */
+#define SJA1105_SEARCH -1
+
+struct sja1105_dyn_cmd;
struct sja1105_dynamic_table_ops {
/* This returns size_t just to keep same prototype as the
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 1c3959efebc4..66e90bbe8bc9 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -210,6 +210,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.maxage = SJA1105_AGEING_TIME_MS(300000),
/* All entries within a FDB bin are available for learning */
.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
/* This selects between Independent VLAN Learning (IVL) and
@@ -225,6 +227,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
* Maybe correlate with no_linklocal_learn from bridge driver?
*/
.no_mgmt_learn = true,
+ /* P/Q/R/S only */
+ .use_static = true,
+ /* Dynamically learned FDB entries can overwrite other (older)
+ * dynamic FDB entries
+ */
+ .owr_dyn = true,
+ .drpnolearn = true,
};
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
@@ -734,15 +743,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-static void sja1105_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void sja1105_mac_config(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
{
struct sja1105_private *priv = ds->priv;
- if (!phydev->link)
+ if (!state->link)
sja1105_adjust_port_config(priv, port, 0, false);
else
- sja1105_adjust_port_config(priv, port, phydev->speed, true);
+ sja1105_adjust_port_config(priv, port, state->speed, true);
}
static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
@@ -785,10 +795,10 @@ static inline int sja1105et_fdb_index(int bin, int way)
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
-static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
- const u8 *addr, u16 vid,
- struct sja1105_l2_lookup_entry *match,
- int *last_unused)
+static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
{
int way;
@@ -817,8 +827,8 @@ static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
return -1;
}
-static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
@@ -826,10 +836,10 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
int last_unused = -1;
int bin, way;
- bin = sja1105_fdb_hash(priv, addr, vid);
+ bin = sja1105et_fdb_hash(priv, addr, vid);
- way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
- &l2_lookup, &last_unused);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
if (way >= 0) {
/* We have an FDB entry. Is our port in the destination
* mask? If yes, we need to do nothing. If not, we need
@@ -873,17 +883,17 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
true);
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
int index, bin, way;
bool keep;
- bin = sja1105_fdb_hash(priv, addr, vid);
- way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
- &l2_lookup, NULL);
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
if (way < 0)
return 0;
index = sja1105et_fdb_index(bin, way);
@@ -893,8 +903,8 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
* need to completely evict the FDB entry.
* Otherwise we just write it back.
*/
- if (l2_lookup.destports & BIT(port))
- l2_lookup.destports &= ~BIT(port);
+ l2_lookup.destports &= ~BIT(port);
+
if (l2_lookup.destports)
keep = true;
else
@@ -904,6 +914,138 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
index, &l2_lookup, keep);
}
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ /* Search for an existing entry in the FDB table */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc == 0) {
+ /* Found and this port is already in the entry's
+ * port mask => job done
+ */
+ if (l2_lookup.destports & BIT(port))
+ return 0;
+ /* l2_lookup.index is populated by the switch in case it
+ * found something.
+ */
+ l2_lookup.destports |= BIT(port);
+ goto skip_finding_an_index;
+ }
+
+ /* Not found, so try to find an unused spot in the FDB.
+ * This is slightly inefficient because the strategy is knock-knock at
+ * every possible position from 0 to 1023.
+ */
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL);
+ if (rc < 0)
+ break;
+ }
+ if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
+ dev_err(ds->dev, "FDB is full, cannot add entry.\n");
+ return -EINVAL;
+ }
+ l2_lookup.index = i;
+
+skip_finding_an_index:
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+}
+
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ bool keep;
+ int rc;
+
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc < 0)
+ return 0;
+
+ l2_lookup.destports &= ~BIT(port);
+
+ /* Decide whether we remove just this port from the FDB entry,
+ * or if we remove it completely.
+ */
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ /* Since we make use of VLANs even when the bridge core doesn't tell us
+ * to, translate these FDB entries into the correct dsa_8021q ones.
+ */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, port);
+ u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 rx_vid = dsa_8021q_rx_vid(ds, port);
+
+ rc = priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
+ if (rc < 0)
+ return rc;
+ return priv->info->fdb_add_cmd(ds, upstream, addr, rx_vid);
+ }
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ /* Since we make use of VLANs even when the bridge core doesn't tell us
+ * to, translate these FDB entries into the correct dsa_8021q ones.
+ */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, port);
+ u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 rx_vid = dsa_8021q_rx_vid(ds, port);
+
+ rc = priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
+ if (rc < 0)
+ return rc;
+ return priv->info->fdb_del_cmd(ds, upstream, addr, rx_vid);
+ }
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -919,7 +1061,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &l2_lookup);
/* No fdb entry at i, not an issue */
- if (rc == -EINVAL)
+ if (rc == -ENOENT)
continue;
if (rc) {
dev_err(dev, "Failed to dump FDB: %d\n", rc);
@@ -935,6 +1077,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ /* We need to hide the dsa_8021q VLAN from the user.
+ * Convert the TX VID into the pvid that is active in
+ * standalone and non-vlan_filtering modes, aka 1.
+ * The RX VID is applied on the CPU port, which is not seen by
+ * the bridge core anyway, so there's nothing to hide.
+ */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ l2_lookup.vlanid = 1;
cb(macaddr, l2_lookup.vlanid, false, data);
}
return 0;
@@ -1446,6 +1597,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
if (!timeout) {
/* Clean up the management route so that a follow-up
* frame may not match on it by mistake.
+ * This is only hardware supported on P/Q/R/S - on E/T it is
+ * a no-op and we are silently discarding the -EOPNOTSUPP.
*/
sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, false);
@@ -1515,9 +1668,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
- .adjust_link = sja1105_adjust_link,
.set_ageing_time = sja1105_set_ageing_time,
.phylink_validate = sja1105_phylink_validate,
+ .phylink_mac_config = sja1105_mac_config,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 2eb70b8acfc3..b1344ed1697f 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -541,6 +541,8 @@ struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
.regs = &sja1105et_regs,
.name = "SJA1105E",
};
@@ -550,6 +552,8 @@ struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
.regs = &sja1105et_regs,
.name = "SJA1105T",
};
@@ -559,6 +563,8 @@ struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
.regs = &sja1105pqrs_regs,
.name = "SJA1105P",
};
@@ -568,6 +574,8 @@ struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
.regs = &sja1105pqrs_regs,
.name = "SJA1105Q",
};
@@ -577,6 +585,8 @@ struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
.regs = &sja1105pqrs_regs,
.name = "SJA1105R",
};
@@ -587,5 +597,7 @@ struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
.name = "SJA1105S",
};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index b3c992b0abb0..6d65a7b09395 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -236,10 +236,20 @@ size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_l2_lookup_entry *entry = entry_ptr;
- /* These are static L2 lookup entries, so the structure
- * should match UM11040 Table 16/17 definitions when
- * LOCKEDS is 1.
- */
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
+ sja1105_packing(buf, &entry->takets, 146, 146, size, op);
+ sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
+ sja1105_packing(buf, &entry->retag, 144, 144, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 159, 159, size, op);
+ sja1105_packing(buf, &entry->age, 158, 144, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
+ sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
sja1105_packing(buf, &entry->destports, 21, 17, size, op);
@@ -442,7 +452,7 @@ const char *sja1105_static_config_error_msg[] = {
"vl-forwarding-parameters-table.partspc.",
};
-sja1105_config_valid_t
+static sja1105_config_valid_t
static_config_check_memory_size(const struct sja1105_table *tables)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 069ca8fd059c..d513b1c91b98 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -122,9 +122,35 @@ struct sja1105_l2_lookup_entry {
u64 destports;
u64 enfport;
u64 index;
+ /* P/Q/R/S only */
+ u64 mask_iotag;
+ u64 mask_vlanid;
+ u64 mask_macaddr;
+ u64 iotag;
+ bool lockeds;
+ union {
+ /* LOCKEDS=1: Static FDB entries */
+ struct {
+ u64 tsreg;
+ u64 mirrvlan;
+ u64 takets;
+ u64 mirr;
+ u64 retag;
+ };
+ /* LOCKEDS=0: Dynamically learned FDB entries */
+ struct {
+ u64 touched;
+ u64 age;
+ };
+ };
};
struct sja1105_l2_lookup_params_entry {
+ u64 start_dynspc; /* P/Q/R/S only */
+ u64 drpnolearn; /* P/Q/R/S only */
+ u64 use_static; /* P/Q/R/S only */
+ u64 owr_dyn; /* P/Q/R/S only */
+ u64 learn_once; /* P/Q/R/S only */
u64 maxage; /* Shared */
u64 dyn_tbsz; /* E/T only */
u64 poly; /* E/T only */
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9f80b73f90b1..414bae989e10 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,6 +32,8 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
+#define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32
+#define ENA_ADMIN_EXTRA_PROPERTIES_COUNT 32
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
@@ -60,6 +62,8 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2,
ENA_ADMIN_HW_HINTS = 3,
ENA_ADMIN_LLQ = 4,
+ ENA_ADMIN_EXTRA_PROPERTIES_STRINGS = 5,
+ ENA_ADMIN_EXTRA_PROPERTIES_FLAGS = 6,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
@@ -524,6 +528,11 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */
u16 descriptors_stride_ctrl_enabled;
+
+ /* Maximum size in bytes taken by llq entries in a single tx burst.
+ * Set to 0 when there is no such limit.
+ */
+ u32 max_tx_burst_size;
};
struct ena_admin_queue_feature_desc {
@@ -555,6 +564,14 @@ struct ena_admin_set_feature_mtu_desc {
u32 mtu;
};
+struct ena_admin_get_extra_properties_strings_desc {
+ u32 count;
+};
+
+struct ena_admin_get_extra_properties_flags_desc {
+ u32 flags;
+};
+
struct ena_admin_set_feature_host_attr_desc {
/* host OS info base address in OS memory. host info is 4KB of
* physically contiguous
@@ -859,6 +876,10 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_get_extra_properties_strings_desc extra_properties_strings;
+
+ struct ena_admin_get_extra_properties_flags_desc extra_properties_flags;
} u;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7f8266b191ae..dbc12e383ad2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -115,7 +115,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
GFP_KERNEL);
if (!sq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -137,7 +137,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
GFP_KERNEL);
if (!cq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -160,7 +160,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
GFP_KERNEL);
if (!aenq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -285,7 +285,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -356,7 +356,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
}
@@ -382,7 +382,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed");
+ pr_err("bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -396,6 +396,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header;
+
+ if (io_sq->llq_info.max_entries_in_tx_burst > 0)
+ io_sq->entries_in_tx_burst_left =
+ io_sq->llq_info.max_entries_in_tx_burst;
}
io_sq->tail = 0;
@@ -436,7 +440,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -727,6 +731,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
supported_feat, llq_info->descs_num_before_header);
}
+ llq_info->max_entries_in_tx_burst =
+ (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
+
rc = ena_com_set_llq(ena_dev);
if (rc)
pr_err("Cannot set LLQ configuration: %d\n", rc);
@@ -755,16 +762,26 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
admin_queue->stats.no_completion++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- if (comp_ctx->status == ENA_CMD_COMPLETED)
- pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
- comp_ctx->cmd_opcode);
- else
- pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode,
+ admin_queue->auto_polling ? "ON" : "OFF");
+ /* Check if fallback to polling is enabled */
+ if (admin_queue->auto_polling)
+ admin_queue->polling = true;
+ } else {
+ pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
-
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
+ }
+ /* Check if shifted to polling mode.
+ * This will happen if there is a completion without an interrupt
+ * and autopolling mode is enabled. Continuing normal execution in such case
+ */
+ if (!admin_queue->polling) {
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
+ }
}
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
@@ -822,7 +839,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- pr_err("Read failure: wrong offset provided");
+ pr_err("Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -1643,6 +1660,12 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling)
+{
+ ena_dev->admin_queue.auto_polling = polling;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1870,6 +1893,62 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
}
+int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp resp;
+ struct ena_extra_properties_strings *extra_properties_strings =
+ &ena_dev->extra_properties_strings;
+ u32 rc;
+
+ extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
+ ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
+
+ extra_properties_strings->virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev,
+ extra_properties_strings->size,
+ &extra_properties_strings->dma_addr,
+ GFP_KERNEL);
+ if (unlikely(!extra_properties_strings->virt_addr)) {
+ pr_err("Failed to allocate extra properties strings\n");
+ return 0;
+ }
+
+ rc = ena_com_get_feature_ex(ena_dev, &resp,
+ ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
+ extra_properties_strings->dma_addr,
+ extra_properties_strings->size);
+ if (rc) {
+ pr_debug("Failed to get extra properties strings\n");
+ goto err;
+ }
+
+ return resp.u.extra_properties_strings.count;
+err:
+ ena_com_delete_extra_properties_strings(ena_dev);
+ return 0;
+}
+
+void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
+{
+ struct ena_extra_properties_strings *extra_properties_strings =
+ &ena_dev->extra_properties_strings;
+
+ if (extra_properties_strings->virt_addr) {
+ dma_free_coherent(ena_dev->dmadev,
+ extra_properties_strings->size,
+ extra_properties_strings->virt_addr,
+ extra_properties_strings->dma_addr);
+ extra_properties_strings->virt_addr = NULL;
+ }
+}
+
+int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp,
+ ENA_ADMIN_EXTRA_PROPERTIES_FLAGS);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -2913,8 +2992,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_cfg)
{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
int rc;
- int size;
if (!llq_features->max_llq_num) {
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -2925,12 +3004,10 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- /* Validate the descriptor is not too big */
- size = ena_dev->tx_max_header_size;
- size += ena_dev->llq_info.descs_num_before_header *
- sizeof(struct ena_eth_io_tx_desc);
+ ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
+ (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
- if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ if (unlikely(ena_dev->tx_max_header_size == 0)) {
pr_err("the size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 078d6f2b4f39..6d356cb05420 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -159,6 +159,7 @@ struct ena_com_llq_info {
u16 desc_list_entry_size;
u16 descs_num_before_header;
u16 descs_per_entry;
+ u16 max_entries_in_tx_burst;
};
struct ena_com_io_cq {
@@ -238,6 +239,7 @@ struct ena_com_io_sq {
u8 phase;
u8 desc_entry_size;
u8 dma_addr_bits;
+ u16 entries_in_tx_burst_left;
} ____cacheline_aligned;
struct ena_com_admin_cq {
@@ -281,6 +283,9 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
+ /* Define if fallback to polling mode should occur */
+ bool auto_polling;
+
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -345,6 +350,12 @@ struct ena_host_attribute {
dma_addr_t host_info_dma_addr;
};
+struct ena_extra_properties_strings {
+ u8 *virt_addr;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
/* Each ena_dev is a PCI function. */
struct ena_com_dev {
struct ena_com_admin_queue admin_queue;
@@ -373,6 +384,7 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+ struct ena_extra_properties_strings extra_properties_strings;
};
struct ena_com_dev_get_features_ctx {
@@ -536,6 +548,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
*/
bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: Enable/Disable polling mode
+ *
+ * Set the autopolling mode.
+ * If autopolling is on:
+ * In case of missing interrupt when data is available switch to polling.
+ */
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling);
+
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
@@ -594,6 +617,31 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev);
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp);
+/* ena_com_extra_properties_strings_init - Initialize the extra properties strings buffer.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the extra properties strings buffer.
+ */
+int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_extra_properties_strings - Free the extra properties strings buffer.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated extra properties strings buffer.
+ */
+void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_extra_properties_flags - Retrieve extra properties flags.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Extra properties flags.
+ *
+ * Retrieve the extra properties flags.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
/* ena_com_get_dma_width - Retrieve physical dma address width the device
* supports.
* @ena_dev: ENA communication layer struct
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f6c2d3855be8..cad2b572808e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -82,6 +82,17 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ if (unlikely(!io_sq->entries_in_tx_burst_left)) {
+ pr_err("Error: trying to send more packets than tx burst allows\n");
+ return -ENOSPC;
+ }
+
+ io_sq->entries_in_tx_burst_left--;
+ pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ io_sq->qid, io_sq->entries_in_tx_burst_left);
+ }
+
/* Make sure everything was written into the bounce buffer before
* writing the bounce buffer to the device
*/
@@ -274,23 +285,6 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
return count;
}
-static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- int rc;
-
- if (ena_tx_ctx->meta_valid) {
- rc = memcmp(&io_sq->cached_tx_meta,
- &ena_tx_ctx->ena_meta,
- sizeof(struct ena_com_tx_meta));
-
- if (unlikely(rc != 0))
- return true;
- }
-
- return false;
-}
-
static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 340d02b64ca6..77986c0ea52c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -125,8 +125,55 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
return ena_com_free_desc(io_sq) > temp;
}
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ if (!ena_tx_ctx->meta_valid)
+ return false;
+
+ return !!memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+}
+
+static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
+{
+ return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
+ io_sq->llq_info.max_entries_in_tx_burst > 0;
+}
+
+static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_com_llq_info *llq_info;
+ int descs_after_first_entry;
+ int num_entries_needed = 1;
+ u16 num_descs;
+
+ if (!is_llq_max_tx_burst_exists(io_sq))
+ return false;
+
+ llq_info = &io_sq->llq_info;
+ num_descs = ena_tx_ctx->num_bufs;
+
+ if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
+ ++num_descs;
+
+ if (num_descs > llq_info->descs_num_before_header) {
+ descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
+ num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
+ llq_info->descs_per_entry);
+ }
+
+ pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ num_descs, num_entries_needed);
+
+ return num_entries_needed > io_sq->entries_in_tx_burst_left;
+}
+
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
+ u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
@@ -134,6 +181,12 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
writel(tail, io_sq->db_addr);
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ pr_debug("reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
+ io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ }
+
return 0;
}
@@ -142,15 +195,17 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
u16 unreported_comp, head;
bool need_update;
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (io_cq->cq_head_db_reg && need_update) {
- pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
+ if (unlikely(io_cq->cq_head_db_reg)) {
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (unlikely(need_update)) {
+ pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ writel(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fe596bc30a96..5687a2860f01 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -88,13 +88,14 @@ static const struct ena_stats ena_stats_tx_strings[] = {
static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(cnt),
ENA_STAT_RX_ENTRY(bytes),
+ ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+ ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
ENA_STAT_RX_ENTRY(bad_csum),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num),
- ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
ENA_STAT_RX_ENTRY(csum_unchecked),
@@ -197,15 +198,24 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_dev_admin_queue_stats(adapter, &data);
}
+static int get_stats_sset_count(struct ena_adapter *adapter)
+{
+ return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
int ena_get_sset_count(struct net_device *netdev, int sset)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- if (sset != ETH_SS_STATS)
+ switch (sset) {
+ case ETH_SS_STATS:
+ return get_stats_sset_count(adapter);
+ case ETH_SS_PRIV_FLAGS:
+ return adapter->ena_extra_properties_count;
+ default:
return -EOPNOTSUPP;
-
- return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
- + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+ }
}
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
@@ -247,26 +257,54 @@ static void ena_com_dev_strings(u8 **data)
}
}
-static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+static void get_stats_strings(struct ena_adapter *adapter, u8 *data)
{
- struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats;
int i;
- if (sset != ETH_SS_STATS)
- return;
-
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
-
memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
-
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
}
+static void get_private_flags_strings(struct ena_adapter *adapter, u8 *data)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ u8 *strings = ena_dev->extra_properties_strings.virt_addr;
+ int i;
+
+ if (unlikely(!strings)) {
+ adapter->ena_extra_properties_count = 0;
+ return;
+ }
+
+ for (i = 0; i < adapter->ena_extra_properties_count; i++) {
+ strlcpy(data, strings + ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN * i,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ get_stats_strings(adapter, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ get_private_flags_strings(adapter, data);
+ break;
+ default:
+ break;
+ }
+}
+
static int ena_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
@@ -441,6 +479,7 @@ static void ena_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
+ info->n_priv_flags = adapter->ena_extra_properties_count;
}
static void ena_get_ringparam(struct net_device *netdev,
@@ -798,6 +837,20 @@ static int ena_set_tunable(struct net_device *netdev,
return ret;
}
+static u32 ena_get_priv_flags(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 rc;
+
+ rc = ena_com_get_extra_properties_flags(ena_dev, &get_resp);
+ if (!rc)
+ return get_resp.u.extra_properties_flags.flags;
+
+ return 0;
+}
+
static const struct ethtool_ops ena_ethtool_ops = {
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
@@ -819,6 +872,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_channels = ena_get_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
+ .get_priv_flags = ena_get_priv_flags,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 9c83642922c7..68bed24178d9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -228,11 +228,11 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
}
size = sizeof(u16) * tx_ring->ring_size;
- tx_ring->free_tx_ids = vzalloc_node(size, node);
- if (!tx_ring->free_tx_ids) {
- tx_ring->free_tx_ids = vzalloc(size);
- if (!tx_ring->free_tx_ids)
- goto err_free_tx_ids;
+ tx_ring->free_ids = vzalloc_node(size, node);
+ if (!tx_ring->free_ids) {
+ tx_ring->free_ids = vzalloc(size);
+ if (!tx_ring->free_ids)
+ goto err_tx_free_ids;
}
size = tx_ring->tx_max_header_size;
@@ -245,7 +245,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
/* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++)
- tx_ring->free_tx_ids[i] = i;
+ tx_ring->free_ids[i] = i;
/* Reset tx statistics */
memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
@@ -256,9 +256,9 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
return 0;
err_push_buf_intermediate_buf:
- vfree(tx_ring->free_tx_ids);
- tx_ring->free_tx_ids = NULL;
-err_free_tx_ids:
+ vfree(tx_ring->free_ids);
+ tx_ring->free_ids = NULL;
+err_tx_free_ids:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
err_tx_buffer_info:
@@ -278,8 +278,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- vfree(tx_ring->free_tx_ids);
- tx_ring->free_tx_ids = NULL;
+ vfree(tx_ring->free_ids);
+ tx_ring->free_ids = NULL;
vfree(tx_ring->push_buf_intermediate_buf);
tx_ring->push_buf_intermediate_buf = NULL;
@@ -377,10 +377,10 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
}
size = sizeof(u16) * rx_ring->ring_size;
- rx_ring->free_rx_ids = vzalloc_node(size, node);
- if (!rx_ring->free_rx_ids) {
- rx_ring->free_rx_ids = vzalloc(size);
- if (!rx_ring->free_rx_ids) {
+ rx_ring->free_ids = vzalloc_node(size, node);
+ if (!rx_ring->free_ids) {
+ rx_ring->free_ids = vzalloc(size);
+ if (!rx_ring->free_ids) {
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
return -ENOMEM;
@@ -389,7 +389,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
/* Req id ring for receiving RX pkts out of order */
for (i = 0; i < rx_ring->ring_size; i++)
- rx_ring->free_rx_ids[i] = i;
+ rx_ring->free_ids[i] = i;
/* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
@@ -415,8 +415,8 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- vfree(rx_ring->free_rx_ids);
- rx_ring->free_rx_ids = NULL;
+ vfree(rx_ring->free_ids);
+ rx_ring->free_ids = NULL;
}
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
@@ -531,7 +531,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info;
- req_id = rx_ring->free_rx_ids[next_to_use];
+ req_id = rx_ring->free_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc < 0))
break;
@@ -797,7 +797,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_pkts++;
total_done += tx_info->tx_descs;
- tx_ring->free_tx_ids[next_to_clean] = req_id;
+ tx_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
tx_ring->ring_size);
}
@@ -911,7 +911,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- rx_ring->free_rx_ids[*next_to_clean] = req_id;
+ rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size);
return skb;
@@ -935,7 +935,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page = NULL;
- rx_ring->free_rx_ids[*next_to_clean] = req_id;
+ rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size);
@@ -1001,6 +1001,9 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.csum_good++;
+ u64_stats_update_end(&rx_ring->syncp);
} else {
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.csum_unchecked++;
@@ -1088,7 +1091,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
for (i = 0; i < ena_rx_ctx.descs; i++) {
- rx_ring->free_tx_ids[next_to_clean] =
+ rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
@@ -2152,7 +2155,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
next_to_use = tx_ring->next_to_use;
- req_id = tx_ring->free_tx_ids[next_to_use];
+ req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
@@ -2172,6 +2175,13 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb);
+ if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
+ netif_dbg(adapter, tx_queued, dev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ qid);
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ }
+
/* prepare the packet's descriptors to dma engine */
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
@@ -2362,6 +2372,14 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
+static void ena_extra_properties_strings_destroy(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ ena_com_delete_extra_properties_strings(adapter->ena_dev);
+ adapter->ena_extra_properties_count = 0;
+}
+
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3266,23 +3284,21 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
- static int version_printed;
- struct net_device *netdev;
- struct ena_adapter *adapter;
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
- char *queue_type_str;
- static int adapters_found;
+ struct ena_adapter *adapter;
int io_queue_num, bars, rc;
- int queue_size;
+ struct net_device *netdev;
+ static int adapters_found;
+ char *queue_type_str;
u16 tx_sgl_size = 0;
u16 rx_sgl_size = 0;
+ int queue_size;
bool wd_state;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (version_printed++ == 0)
- dev_info(&pdev->dev, "%s", version);
+ dev_info_once(&pdev->dev, "%s", version);
rc = pci_enable_device_mem(pdev);
if (rc) {
@@ -3417,6 +3433,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
+ adapter->ena_extra_properties_count =
+ ena_com_extra_properties_strings_init(ena_dev);
+
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
@@ -3456,6 +3475,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_rss:
+ ena_extra_properties_strings_destroy(netdev);
ena_com_delete_debug_area(ena_dev);
ena_com_rss_destroy(ena_dev);
err_free_msix:
@@ -3522,6 +3542,8 @@ static void ena_remove(struct pci_dev *pdev)
ena_com_delete_host_info(ena_dev);
+ ena_extra_properties_strings_destroy(netdev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 63870072cbbd..ec111cfc59b2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -208,26 +208,24 @@ struct ena_stats_tx {
struct ena_stats_rx {
u64 cnt;
u64 bytes;
+ u64 rx_copybreak_pkt;
+ u64 csum_good;
u64 refil_partial;
u64 bad_csum;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
u64 bad_desc_num;
- u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring;
u64 csum_unchecked;
};
struct ena_ring {
- union {
- /* Holds the empty requests for TX/RX
- * out of order completions
- */
- u16 *free_tx_ids;
- u16 *free_rx_ids;
- };
+ /* Holds the empty requests for TX/RX
+ * out of order completions
+ */
+ u16 *free_ids;
union {
struct ena_tx_buffer *tx_buffer_info;
@@ -364,6 +362,8 @@ struct ena_adapter {
u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason;
+
+ u8 ena_extra_properties_count;
};
void ena_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 953ff1f9ac70..0058051ba925 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on PCI
+ depends on (PCI || ATH79)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -17,6 +17,14 @@ config NET_VENDOR_ATHEROS
if NET_VENDOR_ATHEROS
+config AG71XX
+ tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
+ depends on ATH79
+ select PHYLIB
+ help
+ If you wish to compile a kernel for AR7XXX/91XXX and enable
+ ethernet support, then you should always answer Y to this.
+
config ATL2
tristate "Atheros L2 Fast Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index aa3d394b87e6..aca696cb6425 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -3,6 +3,7 @@
# Makefile for the Atheros network device drivers.
#
+obj-$(CONFIG_AG71XX) += ag71xx.o
obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
new file mode 100644
index 000000000000..72a57c6cd254
--- /dev/null
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -0,0 +1,1898 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Atheros AR71xx built-in ethernet mac driver
+ *
+ * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
+ *
+ * List of authors contributed to this driver before mainlining:
+ * Alexander Couzens <lynxis@fe80.eu>
+ * Christian Lamparter <chunkeey@gmail.com>
+ * Chuanhong Guo <gch981213@gmail.com>
+ * Daniel F. Dickinson <cshored@thecshore.com>
+ * David Bauer <mail@david-bauer.net>
+ * Felix Fietkau <nbd@nbd.name>
+ * Gabor Juhos <juhosg@freemail.hu>
+ * Hauke Mehrtens <hauke@hauke-m.de>
+ * Johann Neuhauser <johann@it-neuhauser.de>
+ * John Crispin <john@phrozen.org>
+ * Jo-Philipp Wich <jo@mein.io>
+ * Koen Vandeputte <koen.vandeputte@ncentric.com>
+ * Lucian Cristian <lucian.cristian@gmail.com>
+ * Matt Merhar <mattmerhar@protonmail.com>
+ * Milan Krstic <milan.krstic@gmail.com>
+ * Petr Å tetiar <ynezz@true.cz>
+ * Rosen Penev <rosenp@gmail.com>
+ * Stephen Walker <stephendwalker+github@gmail.com>
+ * Vittorio Gambaletta <openwrt@vittgam.net>
+ * Weijie Gao <hackpascal@gmail.com>
+ * Imre Kaloz <kaloz@openwrt.org>
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+/* For our NAPI weight bigger does *NOT* mean better - it means more
+ * D-cache misses and lots more wasted cycles than we'll ever
+ * possibly gain from saving instructions.
+ */
+#define AG71XX_NAPI_WEIGHT 32
+#define AG71XX_OOM_REFILL (1 + HZ / 10)
+
+#define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
+#define AG71XX_INT_TX (AG71XX_INT_TX_PS)
+#define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
+
+#define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
+#define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
+
+#define AG71XX_TX_MTU_LEN 1540
+
+#define AG71XX_TX_RING_SPLIT 512
+#define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
+ AG71XX_TX_RING_SPLIT)
+#define AG71XX_TX_RING_SIZE_DEFAULT 128
+#define AG71XX_RX_RING_SIZE_DEFAULT 256
+
+#define AG71XX_MDIO_RETRY 1000
+#define AG71XX_MDIO_DELAY 5
+#define AG71XX_MDIO_MAX_CLK 5000000
+
+/* Register offsets */
+#define AG71XX_REG_MAC_CFG1 0x0000
+#define MAC_CFG1_TXE BIT(0) /* Tx Enable */
+#define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
+#define MAC_CFG1_RXE BIT(2) /* Rx Enable */
+#define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
+#define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
+#define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
+#define MAC_CFG1_SR BIT(31) /* Soft Reset */
+#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
+ MAC_CFG1_SRX | MAC_CFG1_STX)
+
+#define AG71XX_REG_MAC_CFG2 0x0004
+#define MAC_CFG2_FDX BIT(0)
+#define MAC_CFG2_PAD_CRC_EN BIT(2)
+#define MAC_CFG2_LEN_CHECK BIT(4)
+#define MAC_CFG2_IF_1000 BIT(9)
+#define MAC_CFG2_IF_10_100 BIT(8)
+
+#define AG71XX_REG_MAC_MFL 0x0010
+
+#define AG71XX_REG_MII_CFG 0x0020
+#define MII_CFG_CLK_DIV_4 0
+#define MII_CFG_CLK_DIV_6 2
+#define MII_CFG_CLK_DIV_8 3
+#define MII_CFG_CLK_DIV_10 4
+#define MII_CFG_CLK_DIV_14 5
+#define MII_CFG_CLK_DIV_20 6
+#define MII_CFG_CLK_DIV_28 7
+#define MII_CFG_CLK_DIV_34 8
+#define MII_CFG_CLK_DIV_42 9
+#define MII_CFG_CLK_DIV_50 10
+#define MII_CFG_CLK_DIV_58 11
+#define MII_CFG_CLK_DIV_66 12
+#define MII_CFG_CLK_DIV_74 13
+#define MII_CFG_CLK_DIV_82 14
+#define MII_CFG_CLK_DIV_98 15
+#define MII_CFG_RESET BIT(31)
+
+#define AG71XX_REG_MII_CMD 0x0024
+#define MII_CMD_READ BIT(0)
+
+#define AG71XX_REG_MII_ADDR 0x0028
+#define MII_ADDR_SHIFT 8
+
+#define AG71XX_REG_MII_CTRL 0x002c
+#define AG71XX_REG_MII_STATUS 0x0030
+#define AG71XX_REG_MII_IND 0x0034
+#define MII_IND_BUSY BIT(0)
+#define MII_IND_INVALID BIT(2)
+
+#define AG71XX_REG_MAC_IFCTL 0x0038
+#define MAC_IFCTL_SPEED BIT(16)
+
+#define AG71XX_REG_MAC_ADDR1 0x0040
+#define AG71XX_REG_MAC_ADDR2 0x0044
+#define AG71XX_REG_FIFO_CFG0 0x0048
+#define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
+#define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
+#define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
+#define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
+#define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
+#define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
+ | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
+#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
+
+#define FIFO_CFG0_ENABLE_SHIFT 8
+
+#define AG71XX_REG_FIFO_CFG1 0x004c
+#define AG71XX_REG_FIFO_CFG2 0x0050
+#define AG71XX_REG_FIFO_CFG3 0x0054
+#define AG71XX_REG_FIFO_CFG4 0x0058
+#define FIFO_CFG4_DE BIT(0) /* Drop Event */
+#define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
+#define FIFO_CFG4_FC BIT(2) /* False Carrier */
+#define FIFO_CFG4_CE BIT(3) /* Code Error */
+#define FIFO_CFG4_CR BIT(4) /* CRC error */
+#define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG4_LO BIT(6) /* Length out of range */
+#define FIFO_CFG4_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG4_DR BIT(10) /* Dribble */
+#define FIFO_CFG4_LE BIT(11) /* Long Event */
+#define FIFO_CFG4_CF BIT(12) /* Control Frame */
+#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
+#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
+ FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
+ FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
+ FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
+ FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
+ FIFO_CFG4_VT)
+
+#define AG71XX_REG_FIFO_CFG5 0x005c
+#define FIFO_CFG5_DE BIT(0) /* Drop Event */
+#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
+#define FIFO_CFG5_FC BIT(2) /* False Carrier */
+#define FIFO_CFG5_CE BIT(3) /* Code Error */
+#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(9) /* Dribble */
+#define FIFO_CFG5_CF BIT(10) /* Control Frame */
+#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(14) /* Long Event */
+#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
+#define FIFO_CFG5_16 BIT(16) /* unknown */
+#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_SF BIT(18) /* Short Frame */
+#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
+#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
+ FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
+ FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
+ FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
+ FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
+ FIFO_CFG5_17 | FIFO_CFG5_SF)
+
+#define AG71XX_REG_TX_CTRL 0x0180
+#define TX_CTRL_TXE BIT(0) /* Tx Enable */
+
+#define AG71XX_REG_TX_DESC 0x0184
+#define AG71XX_REG_TX_STATUS 0x0188
+#define TX_STATUS_PS BIT(0) /* Packet Sent */
+#define TX_STATUS_UR BIT(1) /* Tx Underrun */
+#define TX_STATUS_BE BIT(3) /* Bus Error */
+
+#define AG71XX_REG_RX_CTRL 0x018c
+#define RX_CTRL_RXE BIT(0) /* Rx Enable */
+
+#define AG71XX_DMA_RETRY 10
+#define AG71XX_DMA_DELAY 1
+
+#define AG71XX_REG_RX_DESC 0x0190
+#define AG71XX_REG_RX_STATUS 0x0194
+#define RX_STATUS_PR BIT(0) /* Packet Received */
+#define RX_STATUS_OF BIT(2) /* Rx Overflow */
+#define RX_STATUS_BE BIT(3) /* Bus Error */
+
+#define AG71XX_REG_INT_ENABLE 0x0198
+#define AG71XX_REG_INT_STATUS 0x019c
+#define AG71XX_INT_TX_PS BIT(0)
+#define AG71XX_INT_TX_UR BIT(1)
+#define AG71XX_INT_TX_BE BIT(3)
+#define AG71XX_INT_RX_PR BIT(4)
+#define AG71XX_INT_RX_OF BIT(6)
+#define AG71XX_INT_RX_BE BIT(7)
+
+#define AG71XX_REG_FIFO_DEPTH 0x01a8
+#define AG71XX_REG_RX_SM 0x01b0
+#define AG71XX_REG_TX_SM 0x01b4
+
+#define ETH_SWITCH_HEADER_LEN 2
+
+#define AG71XX_DEFAULT_MSG_ENABLE \
+ (NETIF_MSG_DRV \
+ | NETIF_MSG_PROBE \
+ | NETIF_MSG_LINK \
+ | NETIF_MSG_TIMER \
+ | NETIF_MSG_IFDOWN \
+ | NETIF_MSG_IFUP \
+ | NETIF_MSG_RX_ERR \
+ | NETIF_MSG_TX_ERR)
+
+#define DESC_EMPTY BIT(31)
+#define DESC_MORE BIT(24)
+#define DESC_PKTLEN_M 0xfff
+struct ag71xx_desc {
+ u32 data;
+ u32 ctrl;
+ u32 next;
+ u32 pad;
+} __aligned(4);
+
+#define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
+ L1_CACHE_BYTES)
+
+struct ag71xx_buf {
+ union {
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ } tx;
+ struct {
+ dma_addr_t dma_addr;
+ void *rx_buf;
+ } rx;
+ };
+};
+
+struct ag71xx_ring {
+ /* "Hot" fields in the data path. */
+ unsigned int curr;
+ unsigned int dirty;
+
+ /* "Cold" fields - not used in the data path. */
+ struct ag71xx_buf *buf;
+ u16 order;
+ u16 desc_split;
+ dma_addr_t descs_dma;
+ u8 *descs_cpu;
+};
+
+enum ag71xx_type {
+ AR7100,
+ AR7240,
+ AR9130,
+ AR9330,
+ AR9340,
+ QCA9530,
+ QCA9550,
+};
+
+struct ag71xx_dcfg {
+ u32 max_frame_len;
+ const u32 *fifodata;
+ u16 desc_pktlen_mask;
+ bool tx_hang_workaround;
+ enum ag71xx_type type;
+};
+
+struct ag71xx {
+ /* Critical data related to the per-packet data path are clustered
+ * early in this structure to help improve the D-cache footprint.
+ */
+ struct ag71xx_ring rx_ring ____cacheline_aligned;
+ struct ag71xx_ring tx_ring ____cacheline_aligned;
+
+ u16 rx_buf_size;
+ u8 rx_buf_offset;
+
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ u32 msg_enable;
+ const struct ag71xx_dcfg *dcfg;
+
+ /* From this point onwards we're not looking at per-packet fields. */
+ void __iomem *mac_base;
+
+ struct ag71xx_desc *stop_desc;
+ dma_addr_t stop_desc_dma;
+
+ int phy_if_mode;
+
+ struct delayed_work restart_work;
+ struct timer_list oom_timer;
+
+ struct reset_control *mac_reset;
+
+ u32 fifodata[3];
+ int mac_idx;
+
+ struct reset_control *mdio_reset;
+ struct mii_bus *mii_bus;
+ struct clk *clk_mdio;
+ struct clk *clk_eth;
+};
+
+static int ag71xx_desc_empty(struct ag71xx_desc *desc)
+{
+ return (desc->ctrl & DESC_EMPTY) != 0;
+}
+
+static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
+{
+ return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
+}
+
+static int ag71xx_ring_size_order(int size)
+{
+ return fls(size - 1);
+}
+
+static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
+{
+ return ag->dcfg->type == type;
+}
+
+static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
+{
+ iowrite32(value, ag->mac_base + reg);
+ /* flush write */
+ (void)ioread32(ag->mac_base + reg);
+}
+
+static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
+{
+ return ioread32(ag->mac_base + reg);
+}
+
+static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
+{
+ void __iomem *r;
+
+ r = ag->mac_base + reg;
+ iowrite32(ioread32(r) | mask, r);
+ /* flush write */
+ (void)ioread32(r);
+}
+
+static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
+{
+ void __iomem *r;
+
+ r = ag->mac_base + reg;
+ iowrite32(ioread32(r) & ~mask, r);
+ /* flush write */
+ (void)ioread32(r);
+}
+
+static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
+{
+ ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
+}
+
+static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
+{
+ ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
+}
+
+static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ int i;
+
+ for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
+ u32 busy;
+
+ udelay(AG71XX_MDIO_DELAY);
+
+ busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
+ if (!busy)
+ return 0;
+
+ udelay(AG71XX_MDIO_DELAY);
+ }
+
+ netif_err(ag, link, ndev, "MDIO operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct ag71xx *ag = bus->priv;
+ int err, val;
+
+ err = ag71xx_mdio_wait_busy(ag);
+ if (err)
+ return err;
+
+ ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
+ ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
+ /* enable read mode */
+ ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
+
+ err = ag71xx_mdio_wait_busy(ag);
+ if (err)
+ return err;
+
+ val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
+ /* disable read mode */
+ ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
+
+ netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
+ addr, reg, val);
+
+ return val;
+}
+
+static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct ag71xx *ag = bus->priv;
+
+ netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
+ addr, reg, val);
+
+ ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
+ ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
+ ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
+
+ return ag71xx_mdio_wait_busy(ag);
+}
+
+static const u32 ar71xx_mdio_div_table[] = {
+ 4, 4, 6, 8, 10, 14, 20, 28,
+};
+
+static const u32 ar7240_mdio_div_table[] = {
+ 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
+};
+
+static const u32 ar933x_mdio_div_table[] = {
+ 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
+};
+
+static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
+{
+ unsigned long ref_clock;
+ const u32 *table;
+ int ndivs, i;
+
+ ref_clock = clk_get_rate(ag->clk_mdio);
+ if (!ref_clock)
+ return -EINVAL;
+
+ if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
+ table = ar933x_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
+ } else if (ag71xx_is(ag, AR7240)) {
+ table = ar7240_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
+ } else {
+ table = ar71xx_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
+ }
+
+ for (i = 0; i < ndivs; i++) {
+ unsigned long t;
+
+ t = ref_clock / table[i];
+ if (t <= AG71XX_MDIO_MAX_CLK) {
+ *div = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int ag71xx_mdio_reset(struct mii_bus *bus)
+{
+ struct ag71xx *ag = bus->priv;
+ int err;
+ u32 t;
+
+ err = ag71xx_mdio_get_divider(ag, &t);
+ if (err)
+ return err;
+
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
+ usleep_range(100, 200);
+
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int ag71xx_mdio_probe(struct ag71xx *ag)
+{
+ struct device *dev = &ag->pdev->dev;
+ struct net_device *ndev = ag->ndev;
+ static struct mii_bus *mii_bus;
+ struct device_node *np;
+ int err;
+
+ np = dev->of_node;
+ ag->mii_bus = NULL;
+
+ ag->clk_mdio = devm_clk_get(dev, "mdio");
+ if (IS_ERR(ag->clk_mdio)) {
+ netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
+ return PTR_ERR(ag->clk_mdio);
+ }
+
+ err = clk_prepare_enable(ag->clk_mdio);
+ if (err) {
+ netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
+ return err;
+ }
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus) {
+ err = -ENOMEM;
+ goto mdio_err_put_clk;
+ }
+
+ ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
+ if (IS_ERR(ag->mdio_reset)) {
+ netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
+ return PTR_ERR(ag->mdio_reset);
+ }
+
+ mii_bus->name = "ag71xx_mdio";
+ mii_bus->read = ag71xx_mdio_mii_read;
+ mii_bus->write = ag71xx_mdio_mii_write;
+ mii_bus->reset = ag71xx_mdio_reset;
+ mii_bus->priv = ag;
+ mii_bus->parent = dev;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
+
+ if (!IS_ERR(ag->mdio_reset)) {
+ reset_control_assert(ag->mdio_reset);
+ msleep(100);
+ reset_control_deassert(ag->mdio_reset);
+ msleep(200);
+ }
+
+ err = of_mdiobus_register(mii_bus, np);
+ if (err)
+ goto mdio_err_put_clk;
+
+ ag->mii_bus = mii_bus;
+
+ return 0;
+
+mdio_err_put_clk:
+ clk_disable_unprepare(ag->clk_mdio);
+ return err;
+}
+
+static void ag71xx_mdio_remove(struct ag71xx *ag)
+{
+ if (ag->mii_bus)
+ mdiobus_unregister(ag->mii_bus);
+ clk_disable_unprepare(ag->clk_mdio);
+}
+
+static void ag71xx_hw_stop(struct ag71xx *ag)
+{
+ /* disable all interrupts and stop the rx/tx engine */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+}
+
+static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
+{
+ unsigned long timestamp;
+ u32 rx_sm, tx_sm, rx_fd;
+
+ timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ if (likely(time_before(jiffies, timestamp + HZ / 10)))
+ return false;
+
+ if (!netif_carrier_ok(ag->ndev))
+ return false;
+
+ rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
+ if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
+ return true;
+
+ tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
+ rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
+ if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
+ ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
+ return true;
+
+ return false;
+}
+
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int sent = 0, bytes_compl = 0, n = 0;
+ struct net_device *ndev = ag->ndev;
+ int ring_mask, ring_size;
+ bool dma_stuck = false;
+
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
+
+ while (ring->dirty + n != ring->curr) {
+ struct ag71xx_desc *desc;
+ struct sk_buff *skb;
+ unsigned int i;
+
+ i = (ring->dirty + n) & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+ skb = ring->buf[i].tx.skb;
+
+ if (!flush && !ag71xx_desc_empty(desc)) {
+ if (ag->dcfg->tx_hang_workaround &&
+ ag71xx_check_dma_stuck(ag)) {
+ schedule_delayed_work(&ag->restart_work,
+ HZ / 2);
+ dma_stuck = true;
+ }
+ break;
+ }
+
+ if (flush)
+ desc->ctrl |= DESC_EMPTY;
+
+ n++;
+ if (!skb)
+ continue;
+
+ dev_kfree_skb_any(skb);
+ ring->buf[i].tx.skb = NULL;
+
+ bytes_compl += ring->buf[i].tx.len;
+
+ sent++;
+ ring->dirty += n;
+
+ while (n > 0) {
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ n--;
+ }
+ }
+
+ netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
+
+ if (!sent)
+ return 0;
+
+ ag->ndev->stats.tx_bytes += bytes_compl;
+ ag->ndev->stats.tx_packets += sent;
+
+ netdev_completed_queue(ag->ndev, sent, bytes_compl);
+ if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
+ netif_wake_queue(ag->ndev);
+
+ if (!dma_stuck)
+ cancel_delayed_work(&ag->restart_work);
+
+ return sent;
+}
+
+static void ag71xx_dma_wait_stop(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ int i;
+
+ for (i = 0; i < AG71XX_DMA_RETRY; i++) {
+ u32 rx, tx;
+
+ mdelay(AG71XX_DMA_DELAY);
+
+ rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
+ tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
+ if (!rx && !tx)
+ return;
+ }
+
+ netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
+}
+
+static void ag71xx_dma_reset(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ u32 val;
+ int i;
+
+ /* stop RX and TX */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+
+ /* give the hardware some time to really stop all rx/tx activity
+ * clearing the descriptors too early causes random memory corruption
+ */
+ ag71xx_dma_wait_stop(ag);
+
+ /* clear descriptor addresses */
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
+
+ /* clear pending RX/TX interrupts */
+ for (i = 0; i < 256; i++) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ }
+
+ /* clear pending errors */
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
+
+ val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
+ if (val)
+ netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
+ val);
+
+ val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
+
+ /* mask out reserved bits */
+ val &= ~0xff000000;
+
+ if (val)
+ netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
+ val);
+}
+
+static void ag71xx_hw_setup(struct ag71xx *ag)
+{
+ u32 init = MAC_CFG1_INIT;
+
+ /* setup MAC configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
+ MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
+
+ /* setup max frame length to zero */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
+
+ /* setup FIFO configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
+}
+
+static unsigned int ag71xx_max_frame_len(unsigned int mtu)
+{
+ return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
+}
+
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+{
+ u32 t;
+
+ t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
+ | (((u32)mac[3]) << 8) | ((u32)mac[2]);
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
+
+ t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
+}
+
+static void ag71xx_fast_reset(struct ag71xx *ag)
+{
+ struct net_device *dev = ag->ndev;
+ u32 rx_ds;
+ u32 mii_reg;
+
+ ag71xx_hw_stop(ag);
+
+ mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
+ rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
+
+ ag71xx_tx_packets(ag, true);
+
+ reset_control_assert(ag->mac_reset);
+ usleep_range(10, 20);
+ reset_control_deassert(ag->mac_reset);
+ usleep_range(10, 20);
+
+ ag71xx_dma_reset(ag);
+ ag71xx_hw_setup(ag);
+ ag->tx_ring.curr = 0;
+ ag->tx_ring.dirty = 0;
+ netdev_reset_queue(ag->ndev);
+
+ /* setup max frame length */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
+ ag71xx_max_frame_len(ag->ndev->mtu));
+
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
+
+ ag71xx_hw_set_macaddr(ag, dev->dev_addr);
+}
+
+static void ag71xx_hw_start(struct ag71xx *ag)
+{
+ /* start RX engine */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+
+ /* enable interrupts */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
+
+ netif_wake_queue(ag->ndev);
+}
+
+static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
+{
+ struct phy_device *phydev = ag->ndev->phydev;
+ u32 cfg2;
+ u32 ifctl;
+ u32 fifo5;
+
+ if (!phydev->link && update) {
+ ag71xx_hw_stop(ag);
+ return;
+ }
+
+ if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
+ ag71xx_fast_reset(ag);
+
+ cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
+ cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
+ cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0;
+
+ ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
+ ifctl &= ~(MAC_IFCTL_SPEED);
+
+ fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
+ fifo5 &= ~FIFO_CFG5_BM;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ cfg2 |= MAC_CFG2_IF_1000;
+ fifo5 |= FIFO_CFG5_BM;
+ break;
+ case SPEED_100:
+ cfg2 |= MAC_CFG2_IF_10_100;
+ ifctl |= MAC_IFCTL_SPEED;
+ break;
+ case SPEED_10:
+ cfg2 |= MAC_CFG2_IF_10_100;
+ break;
+ default:
+ WARN(1, "not supported speed %i\n", phydev->speed);
+ return;
+ }
+
+ if (ag->tx_ring.desc_split) {
+ ag->fifodata[2] &= 0xffff;
+ ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
+ ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
+
+ ag71xx_hw_start(ag);
+
+ if (update)
+ phy_print_status(phydev);
+}
+
+static void ag71xx_phy_link_adjust(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ ag71xx_link_adjust(ag, true);
+}
+
+static int ag71xx_phy_connect(struct ag71xx *ag)
+{
+ struct device_node *np = ag->pdev->dev.of_node;
+ struct net_device *ndev = ag->ndev;
+ struct device_node *phy_node;
+ struct phy_device *phydev;
+ int ret;
+
+ if (of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n",
+ ret);
+ return ret;
+ }
+
+ phy_node = of_node_get(np);
+ } else {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ }
+
+ if (!phy_node) {
+ netif_err(ag, probe, ndev, "Could not find valid phy node\n");
+ return -ENODEV;
+ }
+
+ phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust,
+ 0, ag->phy_if_mode);
+
+ of_node_put(phy_node);
+
+ if (!phydev) {
+ netif_err(ag, probe, ndev, "Could not connect to PHY device\n");
+ return -ENODEV;
+ }
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void ag71xx_ring_tx_clean(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ u32 bytes_compl = 0, pkts_compl = 0;
+ struct net_device *ndev = ag->ndev;
+
+ while (ring->curr != ring->dirty) {
+ struct ag71xx_desc *desc;
+ u32 i = ring->dirty & ring_mask;
+
+ desc = ag71xx_ring_desc(ring, i);
+ if (!ag71xx_desc_empty(desc)) {
+ desc->ctrl = 0;
+ ndev->stats.tx_errors++;
+ }
+
+ if (ring->buf[i].tx.skb) {
+ bytes_compl += ring->buf[i].tx.len;
+ pkts_compl++;
+ dev_kfree_skb_any(ring->buf[i].tx.skb);
+ }
+ ring->buf[i].tx.skb = NULL;
+ ring->dirty++;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+}
+
+static void ag71xx_ring_tx_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = ring_size - 1;
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32)(ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
+
+ desc->ctrl = DESC_EMPTY;
+ ring->buf[i].tx.skb = NULL;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ ring->curr = 0;
+ ring->dirty = 0;
+ netdev_reset_queue(ag->ndev);
+}
+
+static void ag71xx_ring_rx_clean(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
+ int i;
+
+ if (!ring->buf)
+ return;
+
+ for (i = 0; i < ring_size; i++)
+ if (ring->buf[i].rx.rx_buf) {
+ dma_unmap_single(&ag->pdev->dev,
+ ring->buf[i].rx.dma_addr,
+ ag->rx_buf_size, DMA_FROM_DEVICE);
+ skb_free_frag(ring->buf[i].rx.rx_buf);
+ }
+}
+
+static int ag71xx_buffer_size(struct ag71xx *ag)
+{
+ return ag->rx_buf_size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
+ int offset,
+ void *(*alloc)(unsigned int size))
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct ag71xx_desc *desc;
+ void *data;
+
+ desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
+
+ data = alloc(ag71xx_buffer_size(ag));
+ if (!data)
+ return false;
+
+ buf->rx.rx_buf = data;
+ buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
+ DMA_FROM_DEVICE);
+ desc->data = (u32)buf->rx.dma_addr + offset;
+ return true;
+}
+
+static int ag71xx_ring_rx_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct net_device *ndev = ag->ndev;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
+ unsigned int i;
+ int ret;
+
+ ret = 0;
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32)(ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
+
+ netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
+ desc, desc->next);
+ }
+
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
+ netdev_alloc_frag)) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ desc->ctrl = DESC_EMPTY;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ ring->curr = 0;
+ ring->dirty = 0;
+
+ return ret;
+}
+
+static int ag71xx_ring_rx_refill(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ int offset = ag->rx_buf_offset;
+ unsigned int count;
+
+ count = 0;
+ for (; ring->curr - ring->dirty > 0; ring->dirty++) {
+ struct ag71xx_desc *desc;
+ unsigned int i;
+
+ i = ring->dirty & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ if (!ring->buf[i].rx.rx_buf &&
+ !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
+ napi_alloc_frag))
+ break;
+
+ desc->ctrl = DESC_EMPTY;
+ count++;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
+ count);
+
+ return count;
+}
+
+static int ag71xx_rings_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size, tx_size;
+
+ ring_size = BIT(tx->order) + BIT(rx->order);
+ tx_size = BIT(tx->order);
+
+ tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
+ if (!tx->buf)
+ return -ENOMEM;
+
+ tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
+ ring_size * AG71XX_DESC_SIZE,
+ &tx->descs_dma, GFP_ATOMIC);
+ if (!tx->descs_cpu) {
+ kfree(tx->buf);
+ tx->buf = NULL;
+ return -ENOMEM;
+ }
+
+ rx->buf = &tx->buf[BIT(tx->order)];
+ rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
+ rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
+
+ ag71xx_ring_tx_init(ag);
+ return ag71xx_ring_rx_init(ag);
+}
+
+static void ag71xx_rings_free(struct ag71xx *ag)
+{
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size;
+
+ ring_size = BIT(tx->order) + BIT(rx->order);
+
+ if (tx->descs_cpu)
+ dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
+ tx->descs_cpu, tx->descs_dma);
+
+ kfree(tx->buf);
+
+ tx->descs_cpu = NULL;
+ rx->descs_cpu = NULL;
+ tx->buf = NULL;
+ rx->buf = NULL;
+}
+
+static void ag71xx_rings_cleanup(struct ag71xx *ag)
+{
+ ag71xx_ring_rx_clean(ag);
+ ag71xx_ring_tx_clean(ag);
+ ag71xx_rings_free(ag);
+
+ netdev_reset_queue(ag->ndev);
+}
+
+static void ag71xx_hw_init(struct ag71xx *ag)
+{
+ ag71xx_hw_stop(ag);
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
+ usleep_range(20, 30);
+
+ reset_control_assert(ag->mac_reset);
+ msleep(100);
+ reset_control_deassert(ag->mac_reset);
+ msleep(200);
+
+ ag71xx_hw_setup(ag);
+
+ ag71xx_dma_reset(ag);
+}
+
+static int ag71xx_hw_enable(struct ag71xx *ag)
+{
+ int ret;
+
+ ret = ag71xx_rings_init(ag);
+ if (ret)
+ return ret;
+
+ napi_enable(&ag->napi);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
+ netif_start_queue(ag->ndev);
+
+ return 0;
+}
+
+static void ag71xx_hw_disable(struct ag71xx *ag)
+{
+ netif_stop_queue(ag->ndev);
+
+ ag71xx_hw_stop(ag);
+ ag71xx_dma_reset(ag);
+
+ napi_disable(&ag->napi);
+ del_timer_sync(&ag->oom_timer);
+
+ ag71xx_rings_cleanup(ag);
+}
+
+static int ag71xx_open(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+ unsigned int max_frame_len;
+ int ret;
+
+ max_frame_len = ag71xx_max_frame_len(ndev->mtu);
+ ag->rx_buf_size =
+ SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
+
+ /* setup max frame length */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
+ ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
+
+ ret = ag71xx_hw_enable(ag);
+ if (ret)
+ goto err;
+
+ ret = ag71xx_phy_connect(ag);
+ if (ret)
+ goto err;
+
+ phy_start(ndev->phydev);
+
+ return 0;
+
+err:
+ ag71xx_rings_cleanup(ag);
+ return ret;
+}
+
+static int ag71xx_stop(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ ag71xx_hw_disable(ag);
+
+ return 0;
+}
+
+static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
+{
+ int i, ring_mask, ndesc, split;
+ struct ag71xx_desc *desc;
+
+ ring_mask = BIT(ring->order) - 1;
+ ndesc = 0;
+ split = ring->desc_split;
+
+ if (!split)
+ split = len;
+
+ while (len > 0) {
+ unsigned int cur_len = len;
+
+ i = (ring->curr + ndesc) & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ if (!ag71xx_desc_empty(desc))
+ return -1;
+
+ if (cur_len > split) {
+ cur_len = split;
+
+ /* TX will hang if DMA transfers <= 4 bytes,
+ * make sure next segment is more than 4 bytes long.
+ */
+ if (len <= split + 4)
+ cur_len -= 4;
+ }
+
+ desc->data = addr;
+ addr += cur_len;
+ len -= cur_len;
+
+ if (len > 0)
+ cur_len |= DESC_MORE;
+
+ /* prevent early tx attempt of this descriptor */
+ if (!ndesc)
+ cur_len |= DESC_EMPTY;
+
+ desc->ctrl = cur_len;
+ ndesc++;
+ }
+
+ return ndesc;
+}
+
+static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int i, n, ring_min, ring_mask, ring_size;
+ struct ag71xx *ag = netdev_priv(ndev);
+ struct ag71xx_ring *ring;
+ struct ag71xx_desc *desc;
+ dma_addr_t dma_addr;
+
+ ring = &ag->tx_ring;
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ if (skb->len <= 4) {
+ netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
+ goto err_drop;
+ }
+
+ dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ i = ring->curr & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ /* setup descriptor fields */
+ n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
+ skb->len & ag->dcfg->desc_pktlen_mask);
+ if (n < 0)
+ goto err_drop_unmap;
+
+ i = (ring->curr + n - 1) & ring_mask;
+ ring->buf[i].tx.len = skb->len;
+ ring->buf[i].tx.skb = skb;
+
+ netdev_sent_queue(ndev, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ desc->ctrl &= ~DESC_EMPTY;
+ ring->curr += n;
+
+ /* flush descriptor */
+ wmb();
+
+ ring_min = 2;
+ if (ring->desc_split)
+ ring_min *= AG71XX_TX_RING_DS_PER_PKT;
+
+ if (ring->curr - ring->dirty >= ring_size - ring_min) {
+ netif_dbg(ag, tx_err, ndev, "tx queue full\n");
+ netif_stop_queue(ndev);
+ }
+
+ netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
+
+ /* enable TX engine */
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
+
+ return NETDEV_TX_OK;
+
+err_drop_unmap:
+ dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+err_drop:
+ ndev->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!ndev->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static void ag71xx_oom_timer_handler(struct timer_list *t)
+{
+ struct ag71xx *ag = from_timer(ag, t, oom_timer);
+
+ napi_schedule(&ag->napi);
+}
+
+static void ag71xx_tx_timeout(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ netif_err(ag, tx_err, ndev, "tx timeout\n");
+
+ schedule_delayed_work(&ag->restart_work, 1);
+}
+
+static void ag71xx_restart_work_func(struct work_struct *work)
+{
+ struct ag71xx *ag = container_of(work, struct ag71xx,
+ restart_work.work);
+ struct net_device *ndev = ag->ndev;
+
+ rtnl_lock();
+ ag71xx_hw_disable(ag);
+ ag71xx_hw_enable(ag);
+ if (ndev->phydev->link)
+ ag71xx_link_adjust(ag, false);
+ rtnl_unlock();
+}
+
+static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
+{
+ struct net_device *ndev = ag->ndev;
+ int ring_mask, ring_size, done = 0;
+ unsigned int pktlen_mask, offset;
+ struct sk_buff *next, *skb;
+ struct ag71xx_ring *ring;
+ struct list_head rx_list;
+
+ ring = &ag->rx_ring;
+ pktlen_mask = ag->dcfg->desc_pktlen_mask;
+ offset = ag->rx_buf_offset;
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
+ limit, ring->curr, ring->dirty);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ while (done < limit) {
+ unsigned int i = ring->curr & ring_mask;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+ int pktlen;
+ int err = 0;
+
+ if (ag71xx_desc_empty(desc))
+ break;
+
+ if ((ring->dirty + ring_size) == ring->curr) {
+ WARN_ONCE(1, "RX out of ring");
+ break;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
+
+ pktlen = desc->ctrl & pktlen_mask;
+ pktlen -= ETH_FCS_LEN;
+
+ dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
+ ag->rx_buf_size, DMA_FROM_DEVICE);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += pktlen;
+
+ skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ if (!skb) {
+ skb_free_frag(ring->buf[i].rx.rx_buf);
+ goto next;
+ }
+
+ skb_reserve(skb, offset);
+ skb_put(skb, pktlen);
+
+ if (err) {
+ ndev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ list_add_tail(&skb->list, &rx_list);
+ }
+
+next:
+ ring->buf[i].rx.rx_buf = NULL;
+ done++;
+
+ ring->curr++;
+ }
+
+ ag71xx_ring_rx_refill(ag);
+
+ list_for_each_entry_safe(skb, next, &rx_list, list)
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb_list(&rx_list);
+
+ netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
+ ring->curr, ring->dirty, done);
+
+ return done;
+}
+
+static int ag71xx_poll(struct napi_struct *napi, int limit)
+{
+ struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
+ struct ag71xx_ring *rx_ring = &ag->rx_ring;
+ int rx_ring_size = BIT(rx_ring->order);
+ struct net_device *ndev = ag->ndev;
+ int tx_done, rx_done;
+ u32 status;
+
+ tx_done = ag71xx_tx_packets(ag, false);
+
+ netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
+ rx_done = ag71xx_rx_packets(ag, limit);
+
+ if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
+ goto oom;
+
+ status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
+ if (unlikely(status & RX_STATUS_OF)) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
+ ndev->stats.rx_fifo_errors++;
+
+ /* restart RX */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+ }
+
+ if (rx_done < limit) {
+ if (status & RX_STATUS_PR)
+ goto more;
+
+ status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
+ if (status & TX_STATUS_PS)
+ goto more;
+
+ netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
+ rx_done, tx_done, limit);
+
+ napi_complete(napi);
+
+ /* enable interrupts */
+ ag71xx_int_enable(ag, AG71XX_INT_POLL);
+ return rx_done;
+ }
+
+more:
+ netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
+ rx_done, tx_done, limit);
+ return limit;
+
+oom:
+ netif_err(ag, rx_err, ndev, "out of memory\n");
+
+ mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
+ napi_complete(napi);
+ return 0;
+}
+
+static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ag71xx *ag;
+ u32 status;
+
+ ag = netdev_priv(ndev);
+ status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ if (unlikely(status & AG71XX_INT_ERR)) {
+ if (status & AG71XX_INT_TX_BE) {
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
+ netif_err(ag, intr, ndev, "TX BUS error\n");
+ }
+ if (status & AG71XX_INT_RX_BE) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
+ netif_err(ag, intr, ndev, "RX BUS error\n");
+ }
+ }
+
+ if (likely(status & AG71XX_INT_POLL)) {
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
+ netif_dbg(ag, intr, ndev, "enable polling mode\n");
+ napi_schedule(&ag->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ ndev->mtu = new_mtu;
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
+ ag71xx_max_frame_len(ndev->mtu));
+
+ return 0;
+}
+
+static const struct net_device_ops ag71xx_netdev_ops = {
+ .ndo_open = ag71xx_open,
+ .ndo_stop = ag71xx_stop,
+ .ndo_start_xmit = ag71xx_hard_start_xmit,
+ .ndo_do_ioctl = ag71xx_do_ioctl,
+ .ndo_tx_timeout = ag71xx_tx_timeout,
+ .ndo_change_mtu = ag71xx_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const u32 ar71xx_addr_ar7100[] = {
+ 0x19000000, 0x1a000000,
+};
+
+static int ag71xx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct ag71xx_dcfg *dcfg;
+ struct net_device *ndev;
+ struct resource *res;
+ const void *mac_addr;
+ int tx_size, err, i;
+ struct ag71xx *ag;
+
+ if (!np)
+ return -ENODEV;
+
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
+ if (!ndev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ dcfg = of_device_get_match_data(&pdev->dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ ag = netdev_priv(ndev);
+ ag->mac_idx = -1;
+ for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
+ if (ar71xx_addr_ar7100[i] == res->start)
+ ag->mac_idx = i;
+ }
+
+ if (ag->mac_idx < 0) {
+ netif_err(ag, probe, ndev, "unknown mac idx\n");
+ return -EINVAL;
+ }
+
+ ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
+ if (IS_ERR(ag->clk_eth)) {
+ netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
+ return PTR_ERR(ag->clk_eth);
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ag->pdev = pdev;
+ ag->ndev = ndev;
+ ag->dcfg = dcfg;
+ ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
+ memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
+
+ ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
+ if (IS_ERR(ag->mac_reset)) {
+ netif_err(ag, probe, ndev, "missing mac reset\n");
+ err = PTR_ERR(ag->mac_reset);
+ goto err_free;
+ }
+
+ ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!ag->mac_base) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
+ 0x0, dev_name(&pdev->dev), ndev);
+ if (err) {
+ netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
+ ndev->irq);
+ goto err_free;
+ }
+
+ ndev->netdev_ops = &ag71xx_netdev_ops;
+
+ INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
+ timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
+
+ tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
+
+ ndev->min_mtu = 68;
+ ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
+
+ ag->rx_buf_offset = NET_SKB_PAD;
+ if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
+ ag->rx_buf_offset += NET_IP_ALIGN;
+
+ if (ag71xx_is(ag, AR7100)) {
+ ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
+ tx_size *= AG71XX_TX_RING_DS_PER_PKT;
+ }
+ ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
+
+ ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct ag71xx_desc),
+ &ag->stop_desc_dma, GFP_KERNEL);
+ if (!ag->stop_desc)
+ goto err_free;
+
+ ag->stop_desc->data = 0;
+ ag->stop_desc->ctrl = 0;
+ ag->stop_desc->next = (u32)ag->stop_desc_dma;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!mac_addr || !is_valid_ether_addr(ndev->dev_addr)) {
+ netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
+ eth_random_addr(ndev->dev_addr);
+ }
+
+ ag->phy_if_mode = of_get_phy_mode(np);
+ if (ag->phy_if_mode < 0) {
+ netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
+ err = ag->phy_if_mode;
+ goto err_free;
+ }
+
+ netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
+
+ err = clk_prepare_enable(ag->clk_eth);
+ if (err) {
+ netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
+ goto err_free;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
+
+ ag71xx_hw_init(ag);
+
+ err = ag71xx_mdio_probe(ag);
+ if (err)
+ goto err_put_clk;
+
+ platform_set_drvdata(pdev, ndev);
+
+ err = register_netdev(ndev);
+ if (err) {
+ netif_err(ag, probe, ndev, "unable to register net device\n");
+ platform_set_drvdata(pdev, NULL);
+ goto err_mdio_remove;
+ }
+
+ netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
+ (unsigned long)ag->mac_base, ndev->irq,
+ phy_modes(ag->phy_if_mode));
+
+ return 0;
+
+err_mdio_remove:
+ ag71xx_mdio_remove(ag);
+err_put_clk:
+ clk_disable_unprepare(ag->clk_eth);
+err_free:
+ free_netdev(ndev);
+ return err;
+}
+
+static int ag71xx_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ag71xx *ag;
+
+ if (!ndev)
+ return 0;
+
+ ag = netdev_priv(ndev);
+ unregister_netdev(ndev);
+ ag71xx_mdio_remove(ag);
+ clk_disable_unprepare(ag->clk_eth);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const u32 ar71xx_fifo_ar7100[] = {
+ 0x0fff0000, 0x00001fff, 0x00780fff,
+};
+
+static const u32 ar71xx_fifo_ar9130[] = {
+ 0x0fff0000, 0x00001fff, 0x008001ff,
+};
+
+static const u32 ar71xx_fifo_ar9330[] = {
+ 0x0010ffff, 0x015500aa, 0x01f00140,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
+ .type = AR7100,
+ .fifodata = ar71xx_fifo_ar7100,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = false,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
+ .type = AR7240,
+ .fifodata = ar71xx_fifo_ar7100,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
+ .type = AR9130,
+ .fifodata = ar71xx_fifo_ar9130,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = false,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
+ .type = AR9330,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
+ .type = AR9340,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = SZ_16K - 1,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
+ .type = QCA9530,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = SZ_16K - 1,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
+ .type = QCA9550,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct of_device_id ag71xx_match[] = {
+ { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
+ { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
+ { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
+ { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
+ { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
+ { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
+ { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
+ {}
+};
+
+static struct platform_driver ag71xx_driver = {
+ .probe = ag71xx_probe,
+ .remove = ag71xx_remove,
+ .driver = {
+ .name = "ag71xx",
+ .of_match_table = ag71xx_match,
+ }
+};
+
+module_platform_driver(ag71xx_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a8fe0808823d..7c06e2aebc9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -280,6 +280,7 @@ struct tp_params {
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 filter_mask;
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
/* cached TP_OUT_CONFIG compressed error vector
@@ -600,6 +601,7 @@ struct port_info {
u8 vin;
u8 vivld;
u8 smt_idx;
+ u8 rx_cchan;
};
struct dentry;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 4107007b6ec4..6232236d7abc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -248,8 +248,9 @@ static int validate_filter(struct net_device *dev,
u32 fconf, iconf;
/* Check for unconfigured fields being used. */
- fconf = adapter->params.tp.vlan_pri_map;
iconf = adapter->params.tp.ingress_config;
+ fconf = fs->hash ? adapter->params.tp.filter_mask :
+ adapter->params.tp.vlan_pri_map;
if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
@@ -1041,7 +1042,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
- RX_CHANNEL_F |
+ RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
@@ -1081,7 +1082,7 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
- RX_CHANNEL_F |
+ RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
@@ -1833,24 +1834,38 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
}
}
-int init_hash_filter(struct adapter *adap)
+void init_hash_filter(struct adapter *adap)
{
+ u32 reg;
+
/* On T6, verify the necessary register configs and warn the user in
* case of improper config
*/
if (is_t6(adap->params.chip)) {
- if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
- goto err;
+ if (is_offload(adap)) {
+ if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
+ & ACTIVEFILTERCOUNTS_F)) {
+ dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
+ return;
+ }
+ } else {
+ reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
+ if (TCAM_ACTV_HIT_G(reg) != 4) {
+ dev_err(adap->pdev_dev, "Invalid hash filter config\n");
+ return;
+ }
+
+ reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
+ if (HASH_ACTV_HIT_G(reg) != 4) {
+ dev_err(adap->pdev_dev, "Invalid hash filter config\n");
+ return;
+ }
+ }
- if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
- goto err;
} else {
dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
- return -EINVAL;
+ return;
}
+
adap->params.hash_filter = 1;
- return 0;
-err:
- dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
- return -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index 8db5fca6dcc9..b0751c0611ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -50,7 +50,7 @@ int delete_filter(struct adapter *adapter, unsigned int fidx);
int writable_filter(struct filter_entry *f);
void clear_all_filters(struct adapter *adapter);
-int init_hash_filter(struct adapter *adap);
+void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 715e4edcf4a2..7d7df59f9a70 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1646,6 +1646,18 @@ unsigned int cxgb4_port_chan(const struct net_device *dev)
}
EXPORT_SYMBOL(cxgb4_port_chan);
+/**
+ * cxgb4_port_e2cchan - get the HW c-channel of a port
+ * @dev: the net device for the port
+ *
+ * Return the HW RX c-channel of the given port.
+ */
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->rx_cchan;
+}
+EXPORT_SYMBOL(cxgb4_port_e2cchan);
+
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
@@ -3905,14 +3917,14 @@ static int adap_init0_phy(struct adapter *adap)
*/
static int adap_init0_config(struct adapter *adapter, int reset)
{
+ char *fw_config_file, fw_config_file_path[256];
+ u32 finiver, finicsum, cfcsum, param, val;
struct fw_caps_config_cmd caps_cmd;
- const struct firmware *cf;
unsigned long mtype = 0, maddr = 0;
- u32 finiver, finicsum, cfcsum;
- int ret;
- int config_issued = 0;
- char *fw_config_file, fw_config_file_path[256];
+ const struct firmware *cf;
char *config_name = NULL;
+ int config_issued = 0;
+ int ret;
/*
* Reset device if necessary.
@@ -4020,6 +4032,24 @@ static int adap_init0_config(struct adapter *adapter, int reset)
goto bye;
}
+ val = 0;
+
+ /* Ofld + Hash filter is supported. Older fw will fail this request and
+ * it is fine.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &param, &val);
+
+ /* FW doesn't know about Hash filter + ofld support,
+ * it's not a problem, don't return an error.
+ */
+ if (ret < 0) {
+ dev_warn(adapter->pdev_dev,
+ "Hash filter with ofld is not supported by FW\n");
+ }
+
/*
* Issue a Capability Configuration command to the firmware to get it
* to parse the Configuration File. We don't use t4_fw_config_file()
@@ -4580,6 +4610,13 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* hash filter has some mandatory register settings to be tested and for
+ * that it needs to test whether offload is enabled or not, hence
+ * checking and setting it here.
+ */
+ if (caps_cmd.ofldcaps)
+ adap->params.offload = 1;
+
if (caps_cmd.ofldcaps ||
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
/* query offload-related parameters */
@@ -4619,11 +4656,8 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
- ret = init_hash_filter(adap);
- if (ret < 0)
- goto bye;
+ init_hash_filter(adap);
} else {
- adap->params.offload = 1;
adap->num_ofld_uld += 1;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 21da34a4ca24..42ae28d651e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -393,6 +393,7 @@ int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 93feb258067b..9dd5ed9a2965 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6209,6 +6209,37 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
}
/**
+ * t4_get_tp_e2c_map - return the E2C channel map associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ */
+static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
+{
+ unsigned int nports;
+ u32 param, val = 0;
+ int ret;
+
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+ if (pidx >= nports) {
+ CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ /* FW version >= 1.16.44.0 can determine E2C channel map using
+ * FW_PARAMS_PARAM_DEV_TPCHMAP API.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
+ ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
+ 0, 1, &param, &val);
+ if (!ret)
+ return (val >> (8 * pidx)) & 0xff;
+
+ return 0;
+}
+
+/**
* t4_get_tp_ch_map - return TP ingress channels associated with a port
* @adapter: the adapter
* @pidx: the port index
@@ -9368,8 +9399,9 @@ int t4_init_sge_params(struct adapter *adapter)
*/
int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
{
- int chan;
- u32 v;
+ u32 param, val, v;
+ int chan, ret;
+
v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
adap->params.tp.tre = TIMERRESOLUTION_G(v);
@@ -9379,11 +9411,47 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
- /* Cache the adapter's Compressed Filter Mode and global Incress
+ /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
* Configuration.
*/
- t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A, sleep_ok);
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
+
+ /* Read current value */
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret == 0) {
+ dev_info(adap->pdev_dev,
+ "Current filter mode/mask 0x%x:0x%x\n",
+ FW_PARAMS_PARAM_FILTER_MODE_G(val),
+ FW_PARAMS_PARAM_FILTER_MASK_G(val));
+ adap->params.tp.vlan_pri_map =
+ FW_PARAMS_PARAM_FILTER_MODE_G(val);
+ adap->params.tp.filter_mask =
+ FW_PARAMS_PARAM_FILTER_MASK_G(val);
+ } else {
+ dev_info(adap->pdev_dev,
+ "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
+
+ /* Incase of older-fw (which doesn't expose the api
+ * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
+ * the fw api) combination, fall-back to older method of reading
+ * the filter mode from indirect-register
+ */
+ t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, sleep_ok);
+
+ /* With the older-fw and newer-driver combination we might run
+ * into an issue when user wants to use hash filter region but
+ * the filter_mask is zero, in this case filter_mask validation
+ * is tough. To avoid that we set the filter_mask same as filter
+ * mode, which will behave exactly as the older way of ignoring
+ * the filter mask validation.
+ */
+ adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
+ }
+
t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A, sleep_ok);
@@ -9594,6 +9662,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->tx_chan = port;
pi->lport = port;
pi->rss_size = rss_size;
+ pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
/* If fw supports returning the VIN as part of FW_VI_CMD,
* save the returned values.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index eb222d40ddbf..a957a6e4d4c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1334,6 +1334,10 @@
#define TP_OUT_CONFIG_A 0x7d04
#define TP_GLOBAL_CONFIG_A 0x7d08
+#define ACTIVEFILTERCOUNTS_S 22
+#define ACTIVEFILTERCOUNTS_V(x) ((x) << ACTIVEFILTERCOUNTS_S)
+#define ACTIVEFILTERCOUNTS_F ACTIVEFILTERCOUNTS_V(1U)
+
#define TP_CMM_TCB_BASE_A 0x7d10
#define TP_CMM_MM_BASE_A 0x7d14
#define TP_CMM_TIMER_BASE_A 0x7d18
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index b2a618e72fcf..0be4ce520352 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1221,6 +1221,23 @@ enum fw_params_mnem {
/*
* device parameters
*/
+
+#define FW_PARAMS_PARAM_FILTER_MODE_S 16
+#define FW_PARAMS_PARAM_FILTER_MODE_M 0xffff
+#define FW_PARAMS_PARAM_FILTER_MODE_V(x) \
+ ((x) << FW_PARAMS_PARAM_FILTER_MODE_S)
+#define FW_PARAMS_PARAM_FILTER_MODE_G(x) \
+ (((x) >> FW_PARAMS_PARAM_FILTER_MODE_S) & \
+ FW_PARAMS_PARAM_FILTER_MODE_M)
+
+#define FW_PARAMS_PARAM_FILTER_MASK_S 0
+#define FW_PARAMS_PARAM_FILTER_MASK_M 0xffff
+#define FW_PARAMS_PARAM_FILTER_MASK_V(x) \
+ ((x) << FW_PARAMS_PARAM_FILTER_MASK_S)
+#define FW_PARAMS_PARAM_FILTER_MASK_G(x) \
+ (((x) >> FW_PARAMS_PARAM_FILTER_MASK_S) & \
+ FW_PARAMS_PARAM_FILTER_MASK_M)
+
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
@@ -1250,12 +1267,15 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
+ FW_PARAMS_PARAM_DEV_TPCHMAP = 0x1F,
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
+ FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
/*
@@ -1347,6 +1367,11 @@ enum fw_params_param_dev_diag {
FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02,
};
+enum fw_params_param_dev_filter {
+ FW_PARAM_DEV_FILTER_VNIC_MODE = 0x00,
+ FW_PARAM_DEV_FILTER_MODE_MASK = 0x01,
+};
+
enum fw_params_param_dev_fwcache {
FW_PARAM_DEV_FWCACHE_FLUSH = 0x00,
FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7d2390e3df77..753957ec72be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2479,14 +2479,9 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
- queue.flc.stash_control = 1;
- queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
- /* 01 01 00 - data, annotation, flow context */
- queue.flc.value |= 0x14;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
- DPNI_QUEUE_OPT_FLC,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
dev_err(dev, "dpni_set_queue(RX) failed\n");
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 8429f5c1d810..ed0d010c7cf2 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -29,3 +29,13 @@ config FSL_ENETC_PTP_CLOCK
packets using the SO_TIMESTAMPING API.
If compiled as module (M), the module name is fsl-enetc-ptp.
+
+config FSL_ENETC_HW_TIMESTAMPING
+ bool "ENETC hardware timestamping support"
+ depends on FSL_ENETC || FSL_ENETC_VF
+ help
+ Enable hardware timestamping support on the Ethernet packets
+ using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
+ allocation has not been supported and it is too expensive to use
+ extended RX BDs if timestamping is not used, this option enables
+ extended RX BDs in order to support hardware timestamping.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 491475d87736..223709443ea4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -13,7 +13,8 @@
#define ENETC_MAX_SKB_FRAGS 13
#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int active_offloads);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -33,7 +34,7 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- count = enetc_map_tx_buffs(tx_ring, skb);
+ count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
if (unlikely(!count))
goto drop_packet_err;
@@ -105,7 +106,8 @@ static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
}
}
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
struct skb_frag_struct *frag;
@@ -137,7 +139,10 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+ do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
+ tx_swbd->do_tstamp = do_tstamp;
+ tx_swbd->check_wb = tx_swbd->do_tstamp;
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
@@ -299,24 +304,70 @@ static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
+static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
+ u64 *tstamp)
+{
+ u32 lo, hi, tstamp_lo;
+
+ lo = enetc_rd(hw, ENETC_SICTR0);
+ hi = enetc_rd(hw, ENETC_SICTR1);
+ tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+ *tstamp = (u64)hi << 32 | tstamp_lo;
+}
+
+static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+}
+
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
+ bool do_tstamp;
+ u64 tstamp = 0;
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ do_tstamp = false;
+
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
bool is_eof = !!tx_swbd->skb;
+ if (unlikely(tx_swbd->check_wb)) {
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ union enetc_tx_bd *txbd;
+
+ txbd = ENETC_TXBD(*tx_ring, i);
+
+ if (txbd->flags & ENETC_TXBD_FLAGS_W &&
+ tx_swbd->do_tstamp) {
+ enetc_get_tx_tstamp(&priv->si->hw, txbd,
+ &tstamp);
+ do_tstamp = true;
+ }
+ }
+
if (likely(tx_swbd->dma))
enetc_unmap_tx_buff(tx_ring, tx_swbd);
if (is_eof) {
+ if (unlikely(do_tstamp)) {
+ enetc_tstamp_tx(tx_swbd->skb, tstamp);
+ do_tstamp = false;
+ }
napi_consume_skb(tx_swbd->skb, napi_budget);
tx_swbd->skb = NULL;
}
@@ -425,10 +476,38 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static void enetc_get_rx_tstamp(struct net_device *ndev,
+ union enetc_rx_bd *rxbd,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 lo, hi, tstamp_lo;
+ u64 tstamp;
+
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
+ lo = enetc_rd(hw, ENETC_SICTR0);
+ hi = enetc_rd(hw, ENETC_SICTR1);
+ tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+
+ tstamp = (u64)hi << 32 | tstamp_lo;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
+ }
+}
+#endif
+
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
- /* TODO: add tstamp, hashing */
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+#endif
+ /* TODO: hashing */
if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
@@ -442,6 +521,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
+#endif
}
static void enetc_process_skb(struct enetc_bdr *rx_ring,
@@ -1074,6 +1157,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ rbmr |= ENETC_RBMR_BDS;
+#endif
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
@@ -1341,6 +1427,62 @@ int enetc_close(struct net_device *ndev)
return 0;
}
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_bdr *tx_ring;
+ u8 num_tc;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
+
+ if (!num_tc) {
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ }
+
+ return 0;
+ }
+
+ /* Check if we have enough BD rings available to accommodate all TCs */
+ if (num_tc > priv->num_tx_rings) {
+ netdev_err(ndev, "Max %d traffic classes supported\n",
+ priv->num_tx_rings);
+ return -EINVAL;
+ }
+
+ /* For the moment, we use only one BD ring per TC.
+ *
+ * Configure num_tc BD rings with increasing priorities.
+ */
+ for (i = 0; i < num_tc; i++) {
+ tx_ring = priv->tx_ring[i];
+ enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ }
+
+ /* Reset the number of netdev queues based on the TC count */
+ netif_set_real_num_tx_queues(ndev, num_tc);
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ /* Each TC is associated with one netdev queue */
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ return 0;
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1396,6 +1538,70 @@ int enetc_set_features(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ break;
+ default:
+ priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+
+ if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+
+ config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif
+
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ if (cmd == SIOCSHWTSTAMP)
+ return enetc_hwtstamp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return enetc_hwtstamp_get(ndev, rq);
+#endif
+ return -EINVAL;
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index b274135c5103..541b4e2073fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -21,7 +21,9 @@ struct enetc_tx_swbd {
struct sk_buff *skb;
dma_addr_t dma;
u16 len;
- u16 is_dma_page;
+ u8 is_dma_page:1;
+ u8 check_wb:1;
+ u8 do_tstamp:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -167,6 +169,12 @@ struct enetc_cls_rule {
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+/* TODO: more hardware offloads */
+enum enetc_active_offloads {
+ ENETC_F_RX_TSTAMP = BIT(0),
+ ENETC_F_TX_TSTAMP = BIT(1),
+};
+
struct enetc_ndev_priv {
struct net_device *ndev;
struct device *dev; /* dma-mapping device */
@@ -178,6 +186,7 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
+ int active_offloads;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -200,6 +209,9 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
+/* PTP driver exports */
+extern int enetc_phc_index;
+
/* SI common */
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
@@ -216,6 +228,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
int enetc_set_features(struct net_device *ndev,
netdev_features_t features);
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index b9519b6ad727..fcb52efec075 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -555,6 +555,35 @@ static void enetc_get_ringparam(struct net_device *ndev,
}
}
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ int *phc_idx;
+
+ phc_idx = symbol_get(enetc_phc_index);
+ if (phc_idx) {
+ info->phc_index = *phc_idx;
+ symbol_put(enetc_phc_index);
+ } else {
+ info->phc_index = -1;
+ }
+
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+#else
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+#endif
+ return 0;
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
@@ -571,6 +600,7 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
@@ -586,6 +616,7 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.set_rxfh = enetc_set_rxfh,
.get_ringparam = enetc_get_ringparam,
.get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index df8eb8882d92..88276299f447 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -127,7 +127,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_TBSR_BUSY BIT(0)
#define ENETC_TBMR_VIH BIT(9)
#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
-#define ENETC_TBMR_PRIO_SET(val) val
+#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK)
#define ENETC_TBMR_EN BIT(31)
#define ENETC_TBSR 0x4
#define ENETC_TBBAR0 0x10
@@ -361,6 +361,12 @@ union enetc_tx_bd {
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
+ struct {
+ __le32 tstamp;
+ u8 reserved[10];
+ u8 status;
+ u8 flags;
+ } wb; /* writeback descriptor */
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
@@ -399,6 +405,9 @@ union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ u8 reserved1[16];
+#endif
} w;
struct {
__le16 inet_csum;
@@ -413,6 +422,10 @@ union enetc_rx_bd {
};
__le32 lstatus;
};
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ __le32 tstamp;
+ u8 reserved[12];
+#endif
} r;
};
@@ -531,3 +544,13 @@ static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
}
+
+static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
+ int prio)
+{
+ u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR);
+
+ val &= ~ENETC_TBMR_PRIO_MASK;
+ val |= ENETC_TBMR_SET_PRIO(prio);
+ enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 78287c517095..258b3cb38a6f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -702,6 +702,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
+ .ndo_do_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_setup_tc,
};
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 8c1497e7d9c5..2fd2586e42bf 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,6 +7,9 @@
#include "enetc.h"
+int enetc_phc_index = -1;
+EXPORT_SYMBOL(enetc_phc_index);
+
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -96,6 +99,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
+ enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -119,6 +123,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+ enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 72c3ea887bcf..ebd21bf4cfa1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -111,6 +111,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
+ .ndo_do_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38f10f7dcbc3..2ee72452ca76 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
*/
if (!is_valid_ether_addr(iap)) {
/* Report it and use a random ethernet address instead */
- netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
eth_hw_addr_random(ndev);
- netdev_info(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
return;
}
@@ -3473,7 +3473,6 @@ fec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to enable phy regulator: %d\n", ret);
- clk_disable_unprepare(fep->clk_ipg);
goto failed_regulator;
}
} else {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7e892b1cbd3d..19e2365be7d8 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -617,7 +617,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
- pr_err("ptp_clock_register failed\n");
+ dev_err(&pdev->dev, "ptp_clock_register failed\n");
}
schedule_delayed_work(&fep->time_keep, HZ);
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index f54da3c684d0..e1bdfed16134 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -144,7 +144,8 @@
/* Hash Key extraction fields: */
#define DEFAULT_HASH_KEY_EXTRACT_FIELDS \
(KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \
- KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST)
+ KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST | \
+ KG_SCH_KN_IPSEC_SPI)
/* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP)
* don't exist in the frame
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index fa8b8506b120..738e01393b68 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -251,6 +251,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_algo->node);
@@ -351,6 +352,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index ad21b0ef1946..2e478d9dc4c6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -154,7 +154,6 @@ enum hnae3_reset_type {
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
- HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
@@ -339,10 +338,14 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * restore_vlan_table()
+ * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
+ * add_arfs_entry
+ * Check the 5-tuples of flow, and create flow director rule
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -492,6 +495,8 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
@@ -502,6 +507,7 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
+ void (*restore_vlan_table)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fc4917ac44be..30354fa33a36 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -252,6 +252,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 196a3d780dcf..0501b7804c07 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4,6 +4,9 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -79,23 +82,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}
-/* This callback function is used to set affinity changes to the irq affinity
- * masks when the irq_set_affinity_notifier function is used.
- */
-static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hns3_enet_tqp_vector *tqp_vectors =
- container_of(notify, struct hns3_enet_tqp_vector,
- affinity_notify);
-
- tqp_vectors->affinity_mask = *mask;
-}
-
-static void hns3_nic_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
@@ -107,8 +93,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;
- /* clear the affinity notifier and affinity mask */
- irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
+ /* clear the affinity mask */
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
/* release the irq resource */
@@ -161,12 +146,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
- tqp_vectors->affinity_notify.notify =
- hns3_nic_irq_affinity_notify;
- tqp_vectors->affinity_notify.release =
- hns3_nic_irq_affinity_release;
- irq_set_affinity_notifier(tqp_vectors->vector_irq,
- &tqp_vectors->affinity_notify);
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -340,6 +319,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}
+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -351,11 +364,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
- return ret;
+ goto free_rmap;
}
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
@@ -384,7 +402,8 @@ out_start_err:
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_nic_uninit_irq(priv);
-
+free_rmap:
+ hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -467,6 +486,8 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
+ hns3_free_rx_cpu_rmap(netdev);
+
/* free irq resources */
hns3_nic_uninit_irq(priv);
@@ -1527,15 +1548,11 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
- if (!ret)
- set_bit(vid, priv->active_vlans);
-
return ret;
}
@@ -1543,33 +1560,11 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
- if (!ret)
- clear_bit(vid, priv->active_vlans);
-
- return ret;
-}
-
-static int hns3_restore_vlan(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret = 0;
- u16 vid;
-
- for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
- ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
- if (ret) {
- netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
- vid, ret);
- return ret;
- }
- }
-
return ret;
}
@@ -1722,6 +1717,32 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
h->ae_algo->ops->reset_event(h->pdev, h);
}
+#ifdef CONFIG_RFS_ACCEL
+static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct hnae3_handle *h = hns3_get_handle(dev);
+ struct flow_keys fkeys;
+
+ if (!h->ae_algo->ops->add_arfs_entry)
+ return -EOPNOTSUPP;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
+ return -EPROTONOSUPPORT;
+
+ if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
+ fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
+ (fkeys.basic.ip_proto != IPPROTO_TCP &&
+ fkeys.basic.ip_proto != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
+
+ return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
+}
+#endif
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1737,6 +1758,10 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = hns3_rx_flow_steer,
+#endif
+
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1895,9 +1920,9 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (!ae_dev) {
+ if (!ae_dev || !ae_dev->ops) {
dev_err(&pdev->dev,
- "Can't recover - error happened during device init\n");
+ "Can't recover - error happened before device initialized\n");
return PCI_ERS_RESULT_NONE;
}
@@ -1916,6 +1941,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
dev_info(dev, "requesting reset due to PCI error\n");
+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+
/* request the reset */
if (ae_dev->ops->reset_event) {
if (!ae_dev->override_pci_need_reset)
@@ -2828,6 +2856,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return ret;
}
+ skb_record_rx_queue(skb, ring->tqp->tqp_index);
*out_skb = skb;
return 0;
@@ -3331,8 +3360,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_notifier(tqp_vector->vector_irq,
- NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3851,6 +3878,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_client_stop(handle);
+ hns3_uninit_phy(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3860,8 +3889,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_force_clear_all_rx_ring(handle);
- hns3_uninit_phy(netdev);
-
hns3_nic_uninit_vector_data(priv);
ret = hns3_nic_dealloc_vector_data(priv);
@@ -4251,12 +4278,8 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
- /* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF)) {
- ret = hns3_restore_vlan(netdev);
- if (ret)
- return ret;
- }
+ if (handle->ae_algo->ops->restore_vlan_table)
+ handle->ae_algo->ops->restore_vlan_table(handle);
return hns3_restore_fd_rules(netdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index c14480f9b625..efab15fc748a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -145,7 +145,7 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
-#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
+#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -550,7 +550,6 @@ struct hns3_nic_priv {
struct notifier_block notifier_block;
/* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index fbd904e3077c..7a3bde724151 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -110,8 +110,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -120,8 +119,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
@@ -175,7 +173,11 @@ static bool hclge_is_special_opcode(u16 opcode)
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG};
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d79a209b80f6..7a14d806744c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -243,6 +243,9 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+ /* M7 stats command */
+ HCLGE_OPC_M7_STATS_BD = 0x7012,
+ HCLGE_OPC_M7_STATS_INFO = 0x7013,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -674,7 +677,6 @@ struct hclge_umv_spc_alc_cmd {
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
-#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
@@ -970,6 +972,11 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
+struct hclge_get_m7_bd_cmd {
+ __le32 bd_num;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index a9ffb57c4607..e1007d96925b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -921,6 +921,61 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.reset_cnt);
}
+void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
+{
+ struct hclge_desc *desc_src, *desc_tmp;
+ struct hclge_get_m7_bd_cmd *req;
+ struct hclge_desc desc;
+ u32 bd_num, buf_len;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+
+ req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics bd number failed, ret=%d\n",
+ ret);
+ return;
+ }
+
+ bd_num = le32_to_cpu(req->bd_num);
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev,
+ "allocate desc for get_m7_stats failed\n");
+ return;
+ }
+
+ desc_tmp = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
+ HCLGE_OPC_M7_STATS_INFO);
+ if (ret) {
+ kfree(desc_src);
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics failed, ret=%d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < bd_num; i++) {
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[0]),
+ le32_to_cpu(desc_tmp->data[1]),
+ le32_to_cpu(desc_tmp->data[2]));
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[3]),
+ le32_to_cpu(desc_tmp->data[4]),
+ le32_to_cpu(desc_tmp->data[5]));
+
+ desc_tmp++;
+ }
+
+ kfree(desc_src);
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
@@ -998,7 +1053,7 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
(unsigned long)stats.time, rem_nsec / 1000,
stats.status);
}
@@ -1029,6 +1084,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
+ hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
hclge_dbg_dump_ncl_config(hdev,
&cmd_buf[sizeof("dump ncl_config")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 4ac80634c984..784512d5f395 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -87,25 +87,25 @@ static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -413,13 +413,13 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -1098,8 +1098,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* query all main PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
@@ -1262,8 +1260,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -1293,8 +1289,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* query all PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
@@ -1348,8 +1342,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -1501,7 +1493,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
return reset_type;
}
-static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
@@ -1574,10 +1566,9 @@ static const struct hclge_hw_blk hw_blk[] = {
{ /* sentinel */ }
};
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
- struct device *dev = &hdev->pdev->dev;
int ret = 0;
while (module->name) {
@@ -1589,10 +1580,6 @@ int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
module++;
}
- ret = hclge_config_rocee_ras_interrupt(hdev, state);
- if (ret)
- dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
-
return ret;
}
@@ -1667,8 +1654,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
/* query all main PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
@@ -1700,8 +1685,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
/* clear all main PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
@@ -1713,8 +1696,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
memset(desc, 0, bd_num * sizeof(struct hclge_desc));
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n",
@@ -1753,8 +1734,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
@@ -1783,7 +1762,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
- set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 9645590c9294..81d115ac13db 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -47,9 +47,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
-#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -119,7 +119,8 @@ struct hclge_hw_error {
};
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b1f8cb1155..cda1b3d096cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static struct hnae3_ae_algo ae_algo;
@@ -290,7 +292,7 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
- .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
.i_port_bitmap = 0x1,
@@ -1226,8 +1228,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
- if (hnae3_dev_fd_supported(hdev))
+ if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ }
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -2423,7 +2427,8 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
- if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->rst_service_task);
}
@@ -2508,6 +2513,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
static void hclge_update_port_capability(struct hclge_mac *mac)
{
+ /* update fec ability by speed */
+ hclge_convert_setting_fec(mac);
+
/* firmware can not identify back plane type, the media type
* read from configuration can help deal it
*/
@@ -2580,6 +2588,10 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->speed_ability = le32_to_cpu(resp->speed_ability);
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
+ if (!resp->active_fec)
+ mac->fec_mode = 0;
+ else
+ mac->fec_mode = BIT(resp->active_fec);
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -2645,6 +2657,7 @@ static void hclge_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
hclge_task_schedule(hdev);
}
@@ -2693,15 +2706,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
- dev_info(&hdev->pdev->dev, "core reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- hdev->rst_stats.core_rst_cnt++;
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
@@ -2861,6 +2865,10 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client;
u16 i;
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -2886,7 +2894,8 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
int ret = 0;
u16 i;
- if (!client)
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
+ !client)
return 0;
if (!client->ops->reset_notify)
@@ -2923,10 +2932,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
break;
- case HNAE3_CORE_RESET:
- reg = HCLGE_GLOBAL_RESET_REG;
- reg_bit = HCLGE_CORE_RESET_BIT;
- break;
case HNAE3_FUNC_RESET:
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
@@ -3058,12 +3063,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
- case HNAE3_CORE_RESET:
- val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
- hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Core Reset requested\n");
- break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
/* schedule again to check later */
@@ -3110,16 +3109,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
rst_level = HNAE3_IMP_RESET;
clear_bit(HNAE3_IMP_RESET, addr);
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
- } else if (test_bit(HNAE3_CORE_RESET, addr)) {
- rst_level = HNAE3_CORE_RESET;
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
@@ -3147,9 +3140,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
case HNAE3_GLOBAL_RESET:
clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
break;
- case HNAE3_CORE_RESET:
- clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- break;
default:
break;
}
@@ -3180,6 +3170,8 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
+#define HCLGE_RESET_SYNC_TIME 100
+
u32 reg_val;
int ret = 0;
@@ -3188,7 +3180,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3208,7 +3200,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
@@ -3222,6 +3214,10 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
break;
}
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGE_RESET_SYNC_TIME);
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
+ HCLGE_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
@@ -3521,6 +3517,10 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
+ if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ hclge_rfs_filter_expire(hdev);
+ hdev->fd_arfs_expire_timer = 0;
+ }
hclge_service_complete(hdev);
}
@@ -4906,14 +4906,18 @@ static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= location)
break;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return rule && rule->location == location;
}
+/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
struct hclge_fd_rule *new_rule,
u16 location,
@@ -4937,9 +4941,13 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
kfree(rule);
hdev->hclge_fd_rule_num--;
- if (!is_add)
- return 0;
+ if (!is_add) {
+ if (!hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ clear_bit(location, hdev->fd_bmap);
+ return 0;
+ }
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
"delete fail, rule %d is inexistent\n",
@@ -4954,7 +4962,9 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
else
hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+ set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++;
+ hdev->fd_active_type = new_rule->rule_type;
return 0;
}
@@ -5112,6 +5122,36 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0;
}
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ if (!rule) {
+ dev_err(&hdev->pdev->dev,
+ "The flow director rule is NULL\n");
+ return -EINVAL;
+ }
+
+ /* it will never fail here, so needn't to check return value */
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ return 0;
+
+clear_rule:
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+ return ret;
+}
+
static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5174,8 +5214,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -ENOMEM;
ret = hclge_fd_get_tuple(hdev, fs, rule);
- if (ret)
- goto free_rule;
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
rule->flow_type = fs->flow_type;
@@ -5184,23 +5226,18 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->vf_id = dst_vport_id;
rule->queue_id = q_index;
rule->action = action;
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
-
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
+ /* to avoid rule conflict, when user configure rule by ethtool,
+ * we need to clear all arfs rules
+ */
+ hclge_clear_arfs_rules(handle);
- ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
- if (ret)
- goto free_rule;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_config_rule(hdev, rule);
- return ret;
+ spin_unlock_bh(&hdev->fd_rule_lock);
-free_rule:
- kfree(rule);
return ret;
}
@@ -5232,8 +5269,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (ret)
return ret;
- return hclge_fd_update_rule_list(hdev, NULL, fs->location,
- false);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return ret;
}
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
@@ -5243,25 +5284,30 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
+ u16 location;
if (!hnae3_dev_fd_supported(hdev))
return;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ for_each_set_bit(location, hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+ NULL, false);
+
if (clear_list) {
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
rule_node) {
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
hlist_del(&rule->rule_node);
kfree(rule);
- hdev->hclge_fd_rule_num--;
}
- } else {
- hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
- rule_node)
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ hdev->hclge_fd_rule_num = 0;
+ bitmap_zero(hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -5283,6 +5329,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (!hdev->fd_en)
return 0;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (!ret)
@@ -5292,11 +5339,18 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
dev_warn(&hdev->pdev->dev,
"Restore rule %d failed, remove it\n",
rule->location);
+ clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
kfree(rule);
hdev->hclge_fd_rule_num--;
}
}
+
+ if (hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5329,13 +5383,18 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ spin_lock_bh(&hdev->fd_rule_lock);
+
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location)
break;
}
- if (!rule || fs->location != rule->location)
+ if (!rule || fs->location != rule->location) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return -ENOENT;
+ }
fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
@@ -5474,6 +5533,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
default:
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
@@ -5505,6 +5565,8 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs->ring_cookie |= vf_id;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5522,20 +5584,208 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2,
&hdev->fd_rule_list, rule_node) {
- if (cnt == cmd->rule_cnt)
+ if (cnt == cmd->rule_cnt) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EMSGSIZE;
+ }
rule_locs[cnt] = rule->location;
cnt++;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
cmd->rule_cnt = cnt;
return 0;
}
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+{
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+ memcpy(tuples->src_ip,
+ fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
+ sizeof(tuples->src_ip));
+ memcpy(tuples->dst_ip,
+ fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
+ sizeof(tuples->dst_ip));
+ }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+ const struct hclge_fd_rule_tuples *tuples)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+ return rule;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+ struct hclge_fd_rule *rule)
+{
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_PORT);
+ rule->action = 0;
+ rule->vf_id = 0;
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ if (tuples->ether_proto == ETH_P_IP) {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V4_FLOW;
+ else
+ rule->flow_type = UDP_V4_FLOW;
+ } else {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V6_FLOW;
+ else
+ rule->flow_type = UDP_V6_FLOW;
+ }
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ u16 tmp_queue_id;
+ u16 bit_id;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ memset(&new_tuples, 0, sizeof(new_tuples));
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ /* when there is already fd rule existed add by user,
+ * arfs should not work
+ */
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* check is there flow director filter existed for this flow,
+ * if not, create a new filter for it;
+ * if filter exist with different queue id, modify the filter;
+ * if filter exist with same queue id, do nothing
+ */
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+ if (!rule) {
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOSPC;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOMEM;
+ }
+
+ set_bit(bit_id, hdev->fd_bmap);
+ rule->location = bit_id;
+ rule->flow_id = flow_id;
+ rule->queue_id = queue_id;
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (ret)
+ return ret;
+
+ return rule->location;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (rule->queue_id == queue_id)
+ return rule->location;
+
+ tmp_queue_id = rule->queue_id;
+ rule->queue_id = queue_id;
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret) {
+ rule->queue_id = tmp_queue_id;
+ return ret;
+ }
+
+ return rule->location;
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ HLIST_HEAD(del_list);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return;
+ }
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+ rule->flow_id, rule->location)) {
+ hlist_del_init(&rule->rule_node);
+ hlist_add_head(&rule->rule_node, &del_list);
+ hdev->hclge_fd_rule_num--;
+ clear_bit(rule->location, hdev->fd_bmap);
+ }
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ kfree(rule);
+ }
+#endif
+}
+
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
+ hclge_del_all_fd_entries(handle, true);
+#endif
+}
+
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5565,10 +5815,12 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ bool clear;
hdev->fd_en = enable;
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
if (!enable)
- hclge_del_all_fd_entries(handle, false);
+ hclge_del_all_fd_entries(handle, clear);
else
hclge_restore_fd_entries(handle);
}
@@ -5838,6 +6090,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ hclge_clear_arfs_rules(handle);
+
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
@@ -6771,6 +7025,12 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
u8 vf_byte_off;
int ret;
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ return 0;
+
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -6806,6 +7066,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return 0;
if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
return 0;
@@ -7140,10 +7401,6 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
{
struct hclge_vport_vlan_cfg *vlan;
- /* vlan 0 is reserved */
- if (!vlan_id)
- return;
-
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return;
@@ -7238,6 +7495,43 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
mutex_unlock(&hdev->vport_cfg_mutex);
}
+static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ u16 vlan_proto, qos;
+ u16 state, vlan_id;
+ int i;
+
+ mutex_lock(&hdev->vport_cfg_mutex);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id, qos,
+ false);
+ continue;
+ }
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0,
+ false);
+ }
+ }
+
+ mutex_unlock(&hdev->vport_cfg_mutex);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -7894,6 +8188,58 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info end.\n");
}
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->nic.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = client->ops->init_instance(&vport->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ /* Enable nic hw error interrupts */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable hw error interrupts\n", ret);
+
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
+ return ret;
+}
+
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->roce.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ client = hdev->roce_client;
+ ret = hclge_init_roce_base_info(vport);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&vport->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -7909,30 +8255,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hdev->nic_client = client;
vport->nic.client = client;
- ret = client->ops->init_instance(&vport->nic);
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
-
- if (netif_msg_drv(&hdev->vport->nic))
- hclge_info_show(hdev);
-
- if (hdev->roce_client &&
- hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
-
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = rc->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(hdev->roce_client,
- ae_dev, 1);
- }
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
break;
case HNAE3_CLIENT_UNIC:
@@ -7952,17 +8281,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(client, ae_dev, 1);
- }
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
break;
default:
@@ -7970,7 +8291,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return 0;
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+
+ return ret;
clear_nic:
hdev->nic_client = NULL;
@@ -7992,6 +8319,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport = &hdev->vport[i];
if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
hdev->roce_client = NULL;
@@ -8000,6 +8328,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -8081,6 +8410,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
static void hclge_state_uninit(struct hclge_dev *hdev)
{
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
@@ -8143,6 +8473,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex);
+ spin_lock_init(&hdev->fd_rule_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -8270,13 +8601,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_hw_error_set_state(hdev, true);
- if (ret) {
- dev_err(&pdev->dev,
- "fail(%d) to enable hw error interrupts\n", ret);
- goto err_mdiobus_unreg;
- }
-
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -8342,6 +8666,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_stats_clear(hdev);
memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -8399,15 +8724,26 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
}
/* Re-enable the hw error interrupts because
- * the interrupts get disabled on core/global reset.
+ * the interrupts get disabled on global reset.
*/
- ret = hclge_hw_error_set_state(hdev, true);
+ ret = hclge_config_nic_hw_error(hdev, true);
if (ret) {
dev_err(&pdev->dev,
- "fail(%d) to re-enable HNS hw error interrupts\n", ret);
+ "fail(%d) to re-enable NIC hw error interrupts\n",
+ ret);
return ret;
}
+ if (hdev->roce_client) {
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable roce ras interrupts\n",
+ ret);
+ return ret;
+ }
+ }
+
hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
@@ -8432,8 +8768,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ /* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
- hclge_hw_error_set_state(hdev, false);
+ hclge_config_nic_hw_error(hdev, false);
+ hclge_config_rocee_ras_interrupt(hdev, false);
+
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -8908,6 +9247,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
@@ -8918,6 +9258,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
+ .restore_vlan_table = hclge_restore_vlan_table,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index dd06b11187b0..414f7db702d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -201,6 +201,8 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_DOWN,
HCLGE_STATE_DISABLED,
HCLGE_STATE_REMOVING,
+ HCLGE_STATE_NIC_REGISTERED,
+ HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
@@ -578,6 +580,16 @@ static const struct key_info tuple_key_info[] = {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
+/* assigned by firmware, the real filter number for each pf may be less */
+#define MAX_FD_FILTER_NUM 4096
+#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+
+enum HCLGE_FD_ACTIVE_RULE_TYPE {
+ HCLGE_FD_RULE_NONE,
+ HCLGE_FD_ARFS_ACTIVE,
+ HCLGE_FD_EP_ACTIVE,
+};
+
enum HCLGE_FD_PACKET_TYPE {
NIC_PACKET,
ROCE_PACKET,
@@ -630,6 +642,8 @@ struct hclge_fd_rule {
u16 vf_id;
u16 queue_id;
u16 location;
+ u16 flow_id; /* only used for arfs */
+ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
};
struct hclge_fd_ad_data {
@@ -806,10 +820,15 @@ struct hclge_dev {
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
+ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
+ u16 fd_arfs_expire_timer;
+ unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
+ enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
u16 wanted_umv_size;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0e04e63f2a94..d20f01720719 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -192,12 +192,10 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
- if (ret)
- return ret;
hclge_free_vector_ring_chain(&ring_chain);
- return 0;
+ return ret;
}
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index a7bbb6d3091a..fac51938ef8e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -397,7 +397,7 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
u8 ir_u, ir_b, ir_s;
int ret;
- ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
HCLGE_SHAPER_LVL_PORT,
&ir_b, &ir_u, &ir_s);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 71f356fc2446..e1588c0e8bb9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -98,7 +98,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -110,7 +109,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5d53467ee2d2..87a619db2780 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1306,6 +1306,10 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
struct hnae3_handle *handle = &hdev->nic;
int ret;
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -1410,6 +1414,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
+#define HCLGEVF_RESET_SYNC_TIME 100
+
int ret = 0;
switch (hdev->reset_type) {
@@ -1427,7 +1433,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGEVF_RESET_SYNC_TIME);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
@@ -1612,7 +1621,8 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -2123,6 +2133,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
if (hdev->keep_alive_timer.function)
del_timer_sync(&hdev->keep_alive_timer);
@@ -2249,6 +2260,48 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info end.\n");
}
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
+ return 0;
+}
+
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -2260,28 +2313,15 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->nic_client = client;
hdev->nic.client = client;
- ret = client->ops->init_instance(&hdev->nic);
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
-
- if (netif_msg_drv(&hdev->nic))
- hclgevf_info_show(hdev);
-
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
-
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
- ret = rc->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
+ ret = hclgevf_init_roce_client_instance(ae_dev,
+ hdev->roce_client);
+ if (ret)
+ goto clear_roce;
- hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
- 1);
- }
break;
case HNAE3_CLIENT_UNIC:
hdev->nic_client = client;
@@ -2299,17 +2339,10 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
- }
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_roce;
- hnae3_set_client_init_flag(client, ae_dev, 1);
break;
default:
return -EINVAL;
@@ -2342,6 +2375,8 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init nic/unic, if this was not called by roce client */
if (client->ops->uninit_instance && hdev->nic_client &&
client->type != HNAE3_CLIENT_ROCE) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
client->ops->uninit_instance(&hdev->nic, 0);
hdev->nic_client = NULL;
hdev->nic.client = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cc52f54f8c08..a7fbd38c1492 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -130,6 +130,8 @@ enum hclgevf_states {
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
HCLGEVF_STATE_IRQ_INITED,
+ HCLGEVF_STATE_REMOVING,
+ HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 3875f39f43bb..756a7e3280bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -313,6 +313,8 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
hw_ioctxt.cmdq_depth = 0;
+ hw_ioctxt.lro_en = 1;
+
hw_ioctxt.rq_depth = ilog2(rq_depth);
hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index c9e621e19dd0..fba4fe82472a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -50,6 +50,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_LRO = 25,
+
HINIC_PORT_CMD_SET_RX_CSUM = 26,
HINIC_PORT_CMD_SET_PORT_STATE = 41,
@@ -62,7 +64,11 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_SET_TSO = 112,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+
HINIC_PORT_CMD_GET_CAP = 170,
+
+ HINIC_PORT_CMD_SET_LRO_TIMER = 244,
};
enum hinic_mgmt_msg_cmd {
@@ -106,7 +112,7 @@ struct hinic_cmd_hw_ioctxt {
u8 set_cmdq_depth;
u8 cmdq_depth;
- u8 rsvd2;
+ u8 lro_en;
u8 rsvd3;
u8 rsvd4;
u8 rsvd5;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a322a22d9357..c1127478881e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -45,6 +45,7 @@
enum io_cmd {
IO_CMD_MODIFY_QUEUE_CTXT = 0,
+ IO_CMD_CLEAN_QUEUE_CTXT,
};
static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
@@ -210,6 +211,59 @@ static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
write_rq_ctxts(func_to_io, base_qpn, num_qps));
}
+static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_buf cmdq_buf;
+ u64 out_param = 0;
+ int err;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ ctxt_block = cmdq_buf.buf;
+ ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmdq_buf.size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ IO_CMD_CLEAN_QUEUE_CTXT,
+ &cmdq_buf, &out_param);
+
+ if (err || out_param) {
+ dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+
+ return err;
+}
+
+static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
+{
+ /* clean LRO/TSO context space */
+ return (hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_SQ) ||
+ hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_RQ));
+}
+
/**
* init_qp - Initialize a Queue Pair
* @func_to_io: func to io channel that holds the IO components
@@ -381,6 +435,12 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
goto err_write_qp_ctxts;
}
+ err = hinic_clean_qp_offload_ctxt(func_to_io);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
+ goto err_write_qp_ctxts;
+ }
+
return 0;
err_write_qp_ctxts:
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
index 376abf00762b..01c41dd705cb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
@@ -201,6 +201,11 @@ struct hinic_rq_ctxt {
u32 wq_block_lo_pfn;
};
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+};
+
struct hinic_sq_ctxt_block {
struct hinic_qp_ctxt_header hdr;
struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 138941527872..ef852b7b57a3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -219,6 +219,26 @@
#define HINIC_MSS_DEFAULT 0x3E00
#define HINIC_MSS_MIN 0x50
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+
+#define RQ_CQE_STATUS_GET(val, member) (((val) >> \
+ RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
+ RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
enum hinic_l4offload_type {
HINIC_L4_OFF_DISABLE = 0,
HINIC_TCP_OFFLOAD_ENABLE = 1,
@@ -372,7 +392,7 @@ struct hinic_rq_cqe {
u32 status;
u32 len;
- u32 rsvd2;
+ u32 offload_type;
u32 rsvd3;
u32 rsvd4;
u32 rsvd5;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index e64bc664f687..419880564ee5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,6 +62,10 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
NETIF_MSG_IFUP | \
NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+
+#define HINIC_LRO_RX_TIMER_DEFAULT 16
+
#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
#define work_to_rx_mode_work(work) \
@@ -72,6 +76,10 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
static int change_mac_addr(struct net_device *netdev, const u8 *addr);
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change);
+
static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
enum hinic_speed speed)
{
@@ -372,6 +380,17 @@ static void free_rxqs(struct hinic_dev *nic_dev)
nic_dev->rxqs = NULL;
}
+static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
+{
+ int err;
+
+ err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -401,6 +420,13 @@ static int hinic_open(struct net_device *netdev)
goto err_create_rxqs;
}
+ err = hinic_configure_max_qnum(nic_dev);
+ if (err) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to configure the maximum number of queues\n");
+ goto err_port_state;
+ }
+
num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
netif_set_real_num_tx_queues(netdev, num_qps);
netif_set_real_num_rx_queues(netdev, num_qps);
@@ -724,7 +750,6 @@ static void set_rx_mode(struct work_struct *work)
{
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- struct netdev_hw_addr *ha;
netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
@@ -732,9 +757,6 @@ static void set_rx_mode(struct work_struct *work)
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
-
- netdev_for_each_mc_addr(ha, nic_dev->netdev)
- add_mac_addr(nic_dev->netdev, ha->addr);
}
static void hinic_set_rx_mode(struct net_device *netdev)
@@ -791,6 +813,29 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->tx_errors = nic_tx_stats->tx_dropped;
}
+static int hinic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return set_features(nic_dev, nic_dev->netdev->features,
+ features, false);
+}
+
+static netdev_features_t hinic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM)) {
+ netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -803,13 +848,16 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+
};
static void netdev_features_init(struct net_device *netdev)
{
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM | NETIF_F_LRO;
netdev->vlan_features = netdev->hw_features;
@@ -882,6 +930,13 @@ static int set_features(struct hinic_dev *nic_dev,
if (changed & NETIF_F_RXCSUM)
err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+ if (changed & NETIF_F_LRO) {
+ err = hinic_set_rx_lro_state(nic_dev,
+ !!(features & NETIF_F_LRO),
+ HINIC_LRO_RX_TIMER_DEFAULT,
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT);
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 122c93597268..c9aedecd19c9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -439,3 +439,117 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
return 0;
}
+
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_rq_num rq_num = { 0 };
+ u16 out_size = sizeof(rq_num);
+ int err;
+
+ rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rq_num.num_rqs = num_rqs;
+ rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
+ &rq_num, sizeof(rq_num),
+ &rq_num, &out_size);
+ if (err || !out_size || rq_num.status) {
+ dev_err(&pdev->dev,
+ "Failed to rxq number, ret = %d\n",
+ rq_num.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
+ u8 max_wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_lro_config lro_cfg = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro offload, ret = %d\n",
+ lro_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_lro_timer lro_timer = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_timer);
+ int err;
+
+ lro_timer.status = 0;
+ lro_timer.type = 0;
+ lro_timer.enable = 1;
+ lro_timer.timer = timer_value;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
+ &lro_timer, sizeof(lro_timer),
+ &lro_timer, &out_size);
+ if (lro_timer.status == 0xFF) {
+ /* For this case, we think status (0xFF) is OK */
+ lro_timer.status = 0;
+ dev_dbg(&pdev->dev,
+ "Set lro timer not supported by the current FW version, it will be 1ms default\n");
+ }
+
+ if (err || !out_size || lro_timer.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro timer, ret = %d\n",
+ lro_timer.status);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u8 ipv4_en;
+ u8 ipv6_en;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num);
+ if (err)
+ return err;
+
+ err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 02d896eed455..972b7be460a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -192,6 +192,42 @@ struct hinic_checksum_offload {
u16 rsvd1;
u32 rx_csum_offload;
};
+
+struct hinic_rq_num {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1[33];
+ u32 num_rqs;
+ u32 rq_depth;
+};
+
+struct hinic_lro_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_lro_timer {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 type; /* 0: set timer value, 1: get timer value */
+ u8 enable; /* when set lro time, enable should be 1 */
+ u16 rsvd1;
+ u32 timer;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -220,7 +256,12 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_port_cap *port_cap);
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs);
+
int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index b6d218768ec1..04c887d13848 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -45,6 +45,15 @@
#define RX_IRQ_NO_RESEND_TIMER 0
#define HINIC_RX_BUFFER_WRITE 16
+#define HINIC_RX_IPV6_PKT 7
+#define LRO_PKT_HDR_LEN_IPV4 66
+#define LRO_PKT_HDR_LEN_IPV6 86
+#define LRO_REPLENISH_THLD 256
+
+#define LRO_PKT_HDR_LEN(cqe) \
+ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
+ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+
/**
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
@@ -90,18 +99,12 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
hinic_rxq_clean_stats(rxq);
}
-static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+static void rx_csum(struct hinic_rxq *rxq, u32 status,
struct sk_buff *skb)
{
struct net_device *netdev = rxq->netdev;
- struct hinic_rq_cqe *cqe;
- struct hinic_rq *rq;
u32 csum_err;
- u32 status;
- rq = rxq->rq;
- cqe = rq->cqe[cons_idx];
- status = be32_to_cpu(cqe->status);
csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
if (!(netdev->features & NETIF_F_RXCSUM))
@@ -321,12 +324,16 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
{
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
u64 pkt_len = 0, rx_bytes = 0;
+ struct hinic_rq *rq = rxq->rq;
struct hinic_rq_wqe *rq_wqe;
unsigned int free_wqebbs;
+ struct hinic_rq_cqe *cqe;
int num_wqes, pkts = 0;
struct hinic_sge sge;
+ unsigned int status;
struct sk_buff *skb;
- u16 ci;
+ u16 ci, num_lro;
+ u16 num_wqe = 0;
while (pkts < budget) {
num_wqes = 0;
@@ -336,11 +343,13 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
if (!rq_wqe)
break;
+ cqe = rq->cqe[ci];
+ status = be32_to_cpu(cqe->status);
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
- rx_csum(rxq, ci, skb);
+ rx_csum(rxq, status, skb);
prefetch(skb->data);
@@ -354,7 +363,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
HINIC_RX_BUF_SZ, ci);
}
- hinic_rq_put_wqe(rxq->rq, ci,
+ hinic_rq_put_wqe(rq, ci,
(num_wqes + 1) * HINIC_RQ_WQE_SIZE);
skb_record_rx_queue(skb, qp->q_id);
@@ -364,6 +373,21 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
pkts++;
rx_bytes += pkt_len;
+
+ num_lro = HINIC_GET_RX_NUM_LRO(status);
+ if (num_lro) {
+ rx_bytes += ((num_lro - 1) *
+ LRO_PKT_HDR_LEN(cqe));
+
+ num_wqe +=
+ (u16)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ }
+
+ cqe->status = 0;
+
+ if (num_wqe >= LRO_REPLENISH_THLD)
+ break;
}
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
@@ -482,6 +506,8 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq->netdev = netdev;
rxq->rq = rq;
+ rxq->buf_len = HINIC_RX_BUF_SZ;
+ rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
rxq_stats_init(rxq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index f8ed3fa6c8ee..08e7d88382cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,6 +41,8 @@ struct hinic_rxq {
struct hinic_rxq_stats rxq_stats;
char *irq_name;
+ u16 buf_len;
+ u32 rx_buff_shift;
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0e09bede42a2..b081a1ef6859 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
- netif_start_queue(adapter->netdev);
+ /* Tx queue started by watchdog timer when link is up */
e1000e_trigger_lsc(adapter);
}
@@ -4606,6 +4606,7 @@ int e1000e_open(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
/* allocate transmit descriptors */
err = e1000e_setup_tx_resources(adapter->tx_ring);
@@ -4666,7 +4667,6 @@ int e1000e_open(struct net_device *netdev)
e1000_irq_enable(adapter);
adapter->tx_hang_recheck = false;
- netif_start_queue(netdev);
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -5288,6 +5288,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (phy->ops.cfg_on_link_up)
phy->ops.cfg_on_link_up(hw);
+ netif_wake_queue(netdev);
netif_carrier_on(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5301,6 +5302,7 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Link status message must follow this format */
pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -5308,13 +5310,8 @@ static void e1000_watchdog_task(struct work_struct *work)
/* 8000ES2LAN requires a Rx packet buffer work-around
* on link down event; reset the controller to flush
* the Rx packet buffer.
- *
- * If the link is lost the controller stops DMA, but
- * if there is queued Tx work it cannot be done. So
- * reset the controller to flush the Tx packet buffers.
*/
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
- e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -5337,6 +5334,14 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
/* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7ce42040b851..8dc98d1d2e86 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -295,8 +295,6 @@ struct i40e_cloud_filter {
u8 tunnel_type;
};
-#define I40E_ETH_P_LLDP 0x88cc
-
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 7ea4f09229e4..dc5b40013e61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1330,7 +1330,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
- I40E_ETH_P_LLDP, 0,
+ ETH_P_LLDP, 0,
pf->vsi[pf->lan_vsi]->seid,
0, true, NULL, NULL);
if (ret) {
@@ -1348,7 +1348,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
- I40E_ETH_P_LLDP, 0,
+ ETH_P_LLDP, 0,
pf->vsi[pf->lan_vsi]->seid,
0, false, NULL, NULL);
if (ret) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 479bc60c8f71..09a7fd4d24e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3943,6 +3943,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int bkt;
u8 i;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -3967,11 +3972,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
- if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
- dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
- return -EAGAIN;
- }
-
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -4302,10 +4302,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &pf->vf[vf_id];
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
- ret = -EAGAIN;
+ if (!vsi) {
+ ret = -ENOENT;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 9cbb5743ed12..c997063ed728 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -12,4 +12,4 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
- iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o
+ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
deleted file mode 100644
index e5ae4a1c0cff..000000000000
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
+++ /dev/null
@@ -1,530 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_CMD_H_
-#define _I40E_ADMINQ_CMD_H_
-
-/* This header file defines the i40e Admin Queue commands and is shared between
- * i40e Firmware and Software. Do not change the names in this file to IAVF
- * because this file should be diff-able against the i40e version, even
- * though many parts have been removed in this VF version.
- *
- * This file needs to comply with the Linux Kernel coding style.
- */
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
-
-#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
- I40E_FW_API_VERSION_MINOR_X710 : \
- I40E_FW_API_VERSION_MINOR_X722)
-
-/* API version 1.7 implements additional link and PHY-specific APIs */
-#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
-/* Admin Queue command opcodes */
-enum i40e_admin_queue_opc {
- /* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
-
- /* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
-
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
-
- /* Proxy commands */
- i40e_aqc_opc_set_proxy_config = 0x0104,
- i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-
- /* LAA */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
-
- /* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
-
- /* WoL commands */
- i40e_aqc_opc_set_wol_filter = 0x0120,
- i40e_aqc_opc_get_wake_reason = 0x0121,
-
- /* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
- i40e_aqc_opc_set_switch_config = 0x0205,
- i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
- i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
- i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
-
- /* Dynamic Device Personalization */
- i40e_aqc_opc_write_personalization_profile = 0x0270,
- i40e_aqc_opc_get_personalization_profile_list = 0x0271,
-
- /* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
- i40e_aqc_opc_set_dcb_parameters = 0x0303,
-
- /* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
- /* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
- i40e_aqc_opc_run_phy_activity = 0x0626,
- i40e_aqc_opc_set_phy_register = 0x0628,
- i40e_aqc_opc_get_phy_register = 0x0629,
-
- /* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
- i40e_aqc_opc_oem_post_update = 0x0720,
- i40e_aqc_opc_thermal_sensor = 0x0721,
-
- /* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
-
- /* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
-
- /* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
-
- /* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_set_rss_key = 0x0B02,
- i40e_aqc_opc_set_rss_lut = 0x0B03,
- i40e_aqc_opc_get_rss_key = 0x0B04,
- i40e_aqc_opc_get_rss_lut = 0x0B05,
-
- /* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
-
- /* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
- i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
- i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
-
- /* debug commands */
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
-};
-
-/* command structures and indirect data structures */
-
-/* Structure naming conventions:
- * - no suffix for direct command descriptor structures
- * - _data for indirect sent data
- * - _resp for indirect return data (data which is both will use _data)
- * - _completion for direct return data
- * - _element_ for repeated elements (may also be _data or _resp)
- *
- * Command structures are expected to overlay the params.raw member of the basic
- * descriptor, and as such cannot exceed 16 bytes in length.
- */
-
-/* This macro is used to generate a compilation error if a structure
- * is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
- */
-#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
- { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
-
-/* This macro is used extensively to ensure that command structures are 16
- * bytes in length as they have to map to the raw array of that size.
- */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
-
-/* Queue Shutdown (direct 0x0003) */
-struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
-
-struct i40e_aqc_vsi_properties_data {
- /* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
- /* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
- /* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
- /* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
- /* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
- /* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
- /* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- /* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
- u8 queueing_opt_reserved[3];
- /* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
- /* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress tbl */
- u8 cmd_reserved[8];
- /* last 32 bytes are written by FW */
- __le16 qs_handle[8];
-#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
-};
-
-I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
-
-/* Get VEB Parameters (direct 0x0232)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
-
-#define I40E_LINK_SPEED_100MB_SHIFT 0x1
-#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
-#define I40E_LINK_SPEED_10GB_SHIFT 0x3
-#define I40E_LINK_SPEED_40GB_SHIFT 0x4
-#define I40E_LINK_SPEED_20GB_SHIFT 0x5
-#define I40E_LINK_SPEED_25GB_SHIFT 0x6
-
-enum i40e_aq_link_speed {
- I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
- I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
-};
-
-/* Send to PF command (indirect 0x0801) id is only used by PF
- * Send to VF command (indirect 0x0802) id is only used by PF
- * Send to Peer PF command (indirect 0x0803)
- */
-struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
-
-struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
- __le16 vsi_id;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
-
-struct i40e_aqc_get_set_rss_key_data {
- u8 standard_rss_key[0x28];
- u8 extended_hash_key[0xc];
-};
-
-I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
-
-struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
- __le16 vsi_id;
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
- BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
-
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
- __le16 flags;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 272d76b733aa..657019418693 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -109,7 +109,7 @@ struct iavf_q_vector {
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
- * supported by all of the i40e hardware is 8.
+ * supported by all of the iavf hardware is 8.
*/
#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
@@ -244,7 +244,7 @@ struct iavf_adapter {
int num_iwarp_msix;
int iwarp_base_vector;
u32 client_pending;
- struct i40e_client_instance *cinst;
+ struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
@@ -351,7 +351,7 @@ struct iavf_adapter {
/* Ethtool Private Flags */
/* lan device, used by client interface */
-struct i40e_device {
+struct iavf_device {
struct list_head list;
struct iavf_adapter *vf;
};
@@ -402,7 +402,7 @@ void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen);
+ enum iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index fca1ecfd9f71..9fa3fa99b4c2 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -4,16 +4,16 @@
#include "iavf_status.h"
#include "iavf_type.h"
#include "iavf_register.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_prototype.h"
/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
+ * iavf_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
-static void i40e_adminq_init_regs(struct iavf_hw *hw)
+static void iavf_adminq_init_regs(struct iavf_hw *hw)
{
/* set head and tail registers in our local struct */
hw->aq.asq.tail = IAVF_VF_ATQT1;
@@ -29,24 +29,24 @@ static void i40e_adminq_init_regs(struct iavf_hw *hw)
}
/**
- * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
- i40e_mem_atq_ring,
+ iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_asq_cmd_details)));
+ sizeof(struct iavf_asq_cmd_details)));
if (ret_code) {
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
@@ -56,55 +56,55 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
}
/**
- * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
- i40e_mem_arq_ring,
+ iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
- * i40e_free_adminq_asq - Free Admin Queue send rings
+ * iavf_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_asq(struct iavf_hw *hw)
+static void iavf_free_adminq_asq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
- * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * iavf_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_arq(struct iavf_hw *hw)
+static void iavf_free_adminq_arq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
- * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct iavf_aq_desc *desc;
struct iavf_dma_mem *bi;
- iavf_status ret_code;
+ enum iavf_status ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -123,7 +123,7 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_arq_buf,
+ iavf_mem_arq_buf,
hw->aq.arq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
@@ -132,9 +132,9 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -165,13 +165,13 @@ unwind_alloc_arq_bufs:
}
/**
- * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
{
struct iavf_dma_mem *bi;
- iavf_status ret_code;
+ enum iavf_status ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
@@ -186,7 +186,7 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_asq_buf,
+ iavf_mem_asq_buf,
hw->aq.asq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
@@ -206,10 +206,10 @@ unwind_alloc_asq_bufs:
}
/**
- * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * iavf_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_arq_bufs(struct iavf_hw *hw)
+static void iavf_free_arq_bufs(struct iavf_hw *hw)
{
int i;
@@ -225,10 +225,10 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw)
}
/**
- * i40e_free_asq_bufs - Free send queue buffer info elements
+ * iavf_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_asq_bufs(struct iavf_hw *hw)
+static void iavf_free_asq_bufs(struct iavf_hw *hw)
{
int i;
@@ -248,14 +248,14 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw)
}
/**
- * i40e_config_asq_regs - configure ASQ registers
+ * iavf_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
-static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
+static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -271,20 +271,20 @@ static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
- * i40e_config_arq_regs - ARQ register configuration
+ * iavf_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
-static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
+static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -303,13 +303,13 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
- * i40e_init_asq - main initialization routine for ASQ
+ * iavf_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
@@ -321,20 +321,20 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static iavf_status i40e_init_asq(struct iavf_hw *hw)
+static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
@@ -342,17 +342,17 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw)
hw->aq.asq.next_to_clean = 0;
/* allocate the ring memory */
- ret_code = i40e_alloc_adminq_asq_ring(hw);
+ ret_code = iavf_alloc_adminq_asq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
- ret_code = i40e_alloc_asq_bufs(hw);
+ ret_code = iavf_alloc_asq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
- ret_code = i40e_config_asq_regs(hw);
+ ret_code = iavf_config_asq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
@@ -361,14 +361,14 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_rings:
- i40e_free_adminq_asq(hw);
+ iavf_free_adminq_asq(hw);
init_adminq_exit:
return ret_code;
}
/**
- * i40e_init_arq - initialize ARQ
+ * iavf_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
@@ -380,20 +380,20 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static iavf_status i40e_init_arq(struct iavf_hw *hw)
+static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
@@ -401,17 +401,17 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw)
hw->aq.arq.next_to_clean = 0;
/* allocate the ring memory */
- ret_code = i40e_alloc_adminq_arq_ring(hw);
+ ret_code = iavf_alloc_adminq_arq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
- ret_code = i40e_alloc_arq_bufs(hw);
+ ret_code = iavf_alloc_arq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
- ret_code = i40e_config_arq_regs(hw);
+ ret_code = iavf_config_arq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
@@ -420,26 +420,26 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_rings:
- i40e_free_adminq_arq(hw);
+ iavf_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
- * i40e_shutdown_asq - shutdown the ASQ
+ * iavf_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
-static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
+static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto shutdown_asq_out;
}
@@ -453,7 +453,7 @@ static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
- i40e_free_asq_bufs(hw);
+ iavf_free_asq_bufs(hw);
shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
@@ -461,19 +461,19 @@ shutdown_asq_out:
}
/**
- * i40e_shutdown_arq - shutdown ARQ
+ * iavf_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
-static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
+static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto shutdown_arq_out;
}
@@ -487,7 +487,7 @@ static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
- i40e_free_arq_bufs(hw);
+ iavf_free_arq_bufs(hw);
shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
@@ -505,32 +505,32 @@ shutdown_arq_out:
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-iavf_status iavf_init_adminq(struct iavf_hw *hw)
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
/* Set up register offsets */
- i40e_adminq_init_regs(hw);
+ iavf_adminq_init_regs(hw);
/* setup ASQ command write back timeout */
- hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
/* allocate the ASQ */
- ret_code = i40e_init_asq(hw);
+ ret_code = iavf_init_asq(hw);
if (ret_code)
goto init_adminq_destroy_locks;
/* allocate the ARQ */
- ret_code = i40e_init_arq(hw);
+ ret_code = iavf_init_arq(hw);
if (ret_code)
goto init_adminq_free_asq;
@@ -538,7 +538,7 @@ iavf_status iavf_init_adminq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_asq:
- i40e_shutdown_asq(hw);
+ iavf_shutdown_asq(hw);
init_adminq_destroy_locks:
init_adminq_exit:
@@ -549,53 +549,53 @@ init_adminq_exit:
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
- i40e_shutdown_asq(hw);
- i40e_shutdown_arq(hw);
+ iavf_shutdown_asq(hw);
+ iavf_shutdown_arq(hw);
return ret_code;
}
/**
- * i40e_clean_asq - cleans Admin send queue
+ * iavf_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
-static u16 i40e_clean_asq(struct iavf_hw *hw)
+static u16 iavf_clean_asq(struct iavf_hw *hw)
{
struct iavf_adminq_ring *asq = &hw->aq.asq;
- struct i40e_asq_cmd_details *details;
+ struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct iavf_aq_desc desc_cb;
+ struct iavf_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
- I40E_ADMINQ_CALLBACK cb_func =
- (I40E_ADMINQ_CALLBACK)details->callback;
+ IAVF_ADMINQ_CALLBACK cb_func =
+ (IAVF_ADMINQ_CALLBACK)details->callback;
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ sizeof(struct iavf_asq_cmd_details));
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
@@ -629,16 +629,17 @@ bool iavf_asq_done(struct iavf_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
- struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct iavf_asq_cmd_details *details;
+ struct iavf_aq_desc *desc_on_ring;
bool cmd_completed = false;
- iavf_status status = 0;
+ enum iavf_status status = 0;
u16 retval = 0;
u32 val = 0;
@@ -647,21 +648,21 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (hw->aq.asq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
- status = I40E_ERR_QUEUE_EMPTY;
+ status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = IAVF_AQ_RC_OK;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
- details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -676,7 +677,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
cpu_to_le32(lower_32_bits(details->cookie));
}
} else {
- memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ memset(details, 0, sizeof(struct iavf_asq_cmd_details));
}
/* clear requested flags and then set additional flags if defined */
@@ -688,7 +689,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
- status = I40E_ERR_INVALID_SIZE;
+ status = IAVF_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
@@ -696,7 +697,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
- status = I40E_ERR_PARAM;
+ status = IAVF_ERR_PARAM;
goto asq_send_command_error;
}
@@ -707,11 +708,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
- if (i40e_clean_asq(hw) == 0) {
+ if (iavf_clean_asq(hw) == 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
- status = I40E_ERR_ADMIN_QUEUE_FULL;
+ status = IAVF_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
@@ -780,13 +781,13 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
- status = I40E_ERR_NOT_READY;
+ else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ status = IAVF_ERR_NOT_READY;
else
- status = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -803,11 +804,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
- status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
@@ -823,12 +824,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
}
/**
@@ -841,13 +842,13 @@ void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
- iavf_status ret_code = 0;
+ struct iavf_aq_desc *desc;
+ enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
u16 datalen;
@@ -863,7 +864,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
if (hw->aq.arq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
- ret_code = I40E_ERR_QUEUE_EMPTY;
+ ret_code = IAVF_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
@@ -871,7 +872,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -880,10 +881,10 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ if (flags & IAVF_AQ_FLAG_ERR) {
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
@@ -906,11 +907,11 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index ee983889eab0..baf2fe26f302 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -6,10 +6,10 @@
#include "iavf_osdep.h"
#include "iavf_status.h"
-#include "i40e_adminq_cmd.h"
+#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,22 +39,22 @@ struct iavf_adminq_ring {
};
/* ASQ transaction details */
-struct i40e_asq_cmd_details {
- void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+struct iavf_asq_cmd_details {
+ void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct iavf_aq_desc *wb_desc;
};
-#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+#define IAVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
-struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+struct iavf_arq_event_info {
+ struct iavf_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -79,45 +79,45 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum iavf_admin_queue_err asq_last_status;
+ enum iavf_admin_queue_err arq_last_status;
};
/**
- * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * iavf_aq_rc_to_posix - convert errors to user-land codes
* aq_ret: AdminQ handler error code can override aq_rc
* aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
- 0, /* I40E_AQ_RC_OK */
- -EPERM, /* I40E_AQ_RC_EPERM */
- -ENOENT, /* I40E_AQ_RC_ENOENT */
- -ESRCH, /* I40E_AQ_RC_ESRCH */
- -EINTR, /* I40E_AQ_RC_EINTR */
- -EIO, /* I40E_AQ_RC_EIO */
- -ENXIO, /* I40E_AQ_RC_ENXIO */
- -E2BIG, /* I40E_AQ_RC_E2BIG */
- -EAGAIN, /* I40E_AQ_RC_EAGAIN */
- -ENOMEM, /* I40E_AQ_RC_ENOMEM */
- -EACCES, /* I40E_AQ_RC_EACCES */
- -EFAULT, /* I40E_AQ_RC_EFAULT */
- -EBUSY, /* I40E_AQ_RC_EBUSY */
- -EEXIST, /* I40E_AQ_RC_EEXIST */
- -EINVAL, /* I40E_AQ_RC_EINVAL */
- -ENOTTY, /* I40E_AQ_RC_ENOTTY */
- -ENOSPC, /* I40E_AQ_RC_ENOSPC */
- -ENOSYS, /* I40E_AQ_RC_ENOSYS */
- -ERANGE, /* I40E_AQ_RC_ERANGE */
- -EPIPE, /* I40E_AQ_RC_EFLUSHED */
- -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
- -EROFS, /* I40E_AQ_RC_EMODE */
- -EFBIG, /* I40E_AQ_RC_EFBIG */
+ 0, /* IAVF_AQ_RC_OK */
+ -EPERM, /* IAVF_AQ_RC_EPERM */
+ -ENOENT, /* IAVF_AQ_RC_ENOENT */
+ -ESRCH, /* IAVF_AQ_RC_ESRCH */
+ -EINTR, /* IAVF_AQ_RC_EINTR */
+ -EIO, /* IAVF_AQ_RC_EIO */
+ -ENXIO, /* IAVF_AQ_RC_ENXIO */
+ -E2BIG, /* IAVF_AQ_RC_E2BIG */
+ -EAGAIN, /* IAVF_AQ_RC_EAGAIN */
+ -ENOMEM, /* IAVF_AQ_RC_ENOMEM */
+ -EACCES, /* IAVF_AQ_RC_EACCES */
+ -EFAULT, /* IAVF_AQ_RC_EFAULT */
+ -EBUSY, /* IAVF_AQ_RC_EBUSY */
+ -EEXIST, /* IAVF_AQ_RC_EEXIST */
+ -EINVAL, /* IAVF_AQ_RC_EINVAL */
+ -ENOTTY, /* IAVF_AQ_RC_ENOTTY */
+ -ENOSPC, /* IAVF_AQ_RC_ENOSPC */
+ -ENOSYS, /* IAVF_AQ_RC_ENOSYS */
+ -ERANGE, /* IAVF_AQ_RC_ERANGE */
+ -EPIPE, /* IAVF_AQ_RC_EFLUSHED */
+ -ESPIPE, /* IAVF_AQ_RC_BAD_ADDR */
+ -EROFS, /* IAVF_AQ_RC_EMODE */
+ -EFBIG, /* IAVF_AQ_RC_EFBIG */
};
/* aq_rc is invalid if AQ timed out */
- if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ if (aq_ret == IAVF_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
@@ -127,9 +127,9 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
}
/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
+#define IAVF_AQ_LARGE_BUF 512
+#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
new file mode 100644
index 000000000000..bc512308557b
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_ADMINQ_CMD_H_
+#define _IAVF_ADMINQ_CMD_H_
+
+/* This header file defines the iavf Admin Queue commands and is shared between
+ * iavf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define IAVF_FW_API_VERSION_MAJOR 0x0001
+#define IAVF_FW_API_VERSION_MINOR_X722 0x0005
+#define IAVF_FW_API_VERSION_MINOR_X710 0x0008
+
+#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
+ IAVF_FW_API_VERSION_MINOR_X710 : \
+ IAVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct iavf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define IAVF_AQ_FLAG_DD_SHIFT 0
+#define IAVF_AQ_FLAG_CMP_SHIFT 1
+#define IAVF_AQ_FLAG_ERR_SHIFT 2
+#define IAVF_AQ_FLAG_VFE_SHIFT 3
+#define IAVF_AQ_FLAG_LB_SHIFT 9
+#define IAVF_AQ_FLAG_RD_SHIFT 10
+#define IAVF_AQ_FLAG_VFC_SHIFT 11
+#define IAVF_AQ_FLAG_BUF_SHIFT 12
+#define IAVF_AQ_FLAG_SI_SHIFT 13
+#define IAVF_AQ_FLAG_EI_SHIFT 14
+#define IAVF_AQ_FLAG_FE_SHIFT 15
+
+#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum iavf_admin_queue_err {
+ IAVF_AQ_RC_OK = 0, /* success */
+ IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ IAVF_AQ_RC_ENOENT = 2, /* No such element */
+ IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ IAVF_AQ_RC_EIO = 5, /* I/O error */
+ IAVF_AQ_RC_ENXIO = 6, /* No such resource */
+ IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ IAVF_AQ_RC_EAGAIN = 8, /* Try again */
+ IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ IAVF_AQ_RC_EACCES = 10, /* Permission denied */
+ IAVF_AQ_RC_EFAULT = 11, /* Bad address */
+ IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ IAVF_AQ_RC_EEXIST = 13, /* object already exists */
+ IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IAVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum iavf_admin_queue_opc {
+ /* aq commands */
+ iavf_aqc_opc_get_version = 0x0001,
+ iavf_aqc_opc_driver_version = 0x0002,
+ iavf_aqc_opc_queue_shutdown = 0x0003,
+ iavf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ iavf_aqc_opc_request_resource = 0x0008,
+ iavf_aqc_opc_release_resource = 0x0009,
+
+ iavf_aqc_opc_list_func_capabilities = 0x000A,
+ iavf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ iavf_aqc_opc_set_proxy_config = 0x0104,
+ iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ iavf_aqc_opc_mac_address_read = 0x0107,
+ iavf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ iavf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ iavf_aqc_opc_set_wol_filter = 0x0120,
+ iavf_aqc_opc_get_wake_reason = 0x0121,
+
+ /* internal switch commands */
+ iavf_aqc_opc_get_switch_config = 0x0200,
+ iavf_aqc_opc_add_statistics = 0x0201,
+ iavf_aqc_opc_remove_statistics = 0x0202,
+ iavf_aqc_opc_set_port_parameters = 0x0203,
+ iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ iavf_aqc_opc_set_switch_config = 0x0205,
+ iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ iavf_aqc_opc_add_vsi = 0x0210,
+ iavf_aqc_opc_update_vsi_parameters = 0x0211,
+ iavf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ iavf_aqc_opc_add_pv = 0x0220,
+ iavf_aqc_opc_update_pv_parameters = 0x0221,
+ iavf_aqc_opc_get_pv_parameters = 0x0222,
+
+ iavf_aqc_opc_add_veb = 0x0230,
+ iavf_aqc_opc_update_veb_parameters = 0x0231,
+ iavf_aqc_opc_get_veb_parameters = 0x0232,
+
+ iavf_aqc_opc_delete_element = 0x0243,
+
+ iavf_aqc_opc_add_macvlan = 0x0250,
+ iavf_aqc_opc_remove_macvlan = 0x0251,
+ iavf_aqc_opc_add_vlan = 0x0252,
+ iavf_aqc_opc_remove_vlan = 0x0253,
+ iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ iavf_aqc_opc_add_tag = 0x0255,
+ iavf_aqc_opc_remove_tag = 0x0256,
+ iavf_aqc_opc_add_multicast_etag = 0x0257,
+ iavf_aqc_opc_remove_multicast_etag = 0x0258,
+ iavf_aqc_opc_update_tag = 0x0259,
+ iavf_aqc_opc_add_control_packet_filter = 0x025A,
+ iavf_aqc_opc_remove_control_packet_filter = 0x025B,
+ iavf_aqc_opc_add_cloud_filters = 0x025C,
+ iavf_aqc_opc_remove_cloud_filters = 0x025D,
+ iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
+
+ iavf_aqc_opc_add_mirror_rule = 0x0260,
+ iavf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ iavf_aqc_opc_write_personalization_profile = 0x0270,
+ iavf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ iavf_aqc_opc_dcb_updated = 0x0302,
+ iavf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ iavf_aqc_opc_query_vsi_bw_config = 0x0408,
+ iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ iavf_aqc_opc_query_port_ets_config = 0x0419,
+ iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ iavf_aqc_opc_suspend_port_tx = 0x041B,
+ iavf_aqc_opc_resume_port_tx = 0x041C,
+ iavf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ iavf_aqc_opc_get_phy_abilities = 0x0600,
+ iavf_aqc_opc_set_phy_config = 0x0601,
+ iavf_aqc_opc_set_mac_config = 0x0603,
+ iavf_aqc_opc_set_link_restart_an = 0x0605,
+ iavf_aqc_opc_get_link_status = 0x0607,
+ iavf_aqc_opc_set_phy_int_mask = 0x0613,
+ iavf_aqc_opc_get_local_advt_reg = 0x0614,
+ iavf_aqc_opc_set_local_advt_reg = 0x0615,
+ iavf_aqc_opc_get_partner_advt = 0x0616,
+ iavf_aqc_opc_set_lb_modes = 0x0618,
+ iavf_aqc_opc_get_phy_wol_caps = 0x0621,
+ iavf_aqc_opc_set_phy_debug = 0x0622,
+ iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ iavf_aqc_opc_run_phy_activity = 0x0626,
+ iavf_aqc_opc_set_phy_register = 0x0628,
+ iavf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ iavf_aqc_opc_nvm_read = 0x0701,
+ iavf_aqc_opc_nvm_erase = 0x0702,
+ iavf_aqc_opc_nvm_update = 0x0703,
+ iavf_aqc_opc_nvm_config_read = 0x0704,
+ iavf_aqc_opc_nvm_config_write = 0x0705,
+ iavf_aqc_opc_oem_post_update = 0x0720,
+ iavf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ iavf_aqc_opc_send_msg_to_pf = 0x0801,
+ iavf_aqc_opc_send_msg_to_vf = 0x0802,
+ iavf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ iavf_aqc_opc_alternate_write = 0x0900,
+ iavf_aqc_opc_alternate_write_indirect = 0x0901,
+ iavf_aqc_opc_alternate_read = 0x0902,
+ iavf_aqc_opc_alternate_read_indirect = 0x0903,
+ iavf_aqc_opc_alternate_write_done = 0x0904,
+ iavf_aqc_opc_alternate_set_mode = 0x0905,
+ iavf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ iavf_aqc_opc_lldp_get_mib = 0x0A00,
+ iavf_aqc_opc_lldp_update_mib = 0x0A01,
+ iavf_aqc_opc_lldp_add_tlv = 0x0A02,
+ iavf_aqc_opc_lldp_update_tlv = 0x0A03,
+ iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ iavf_aqc_opc_lldp_stop = 0x0A05,
+ iavf_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ iavf_aqc_opc_add_udp_tunnel = 0x0B00,
+ iavf_aqc_opc_del_udp_tunnel = 0x0B01,
+ iavf_aqc_opc_set_rss_key = 0x0B02,
+ iavf_aqc_opc_set_rss_lut = 0x0B03,
+ iavf_aqc_opc_get_rss_key = 0x0B04,
+ iavf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ iavf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ iavf_aqc_opc_oem_parameter_change = 0xFE00,
+ iavf_aqc_opc_oem_device_status_change = 0xFE01,
+ iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ iavf_aqc_opc_debug_read_reg = 0xFF03,
+ iavf_aqc_opc_debug_write_reg = 0xFF04,
+ iavf_aqc_opc_debug_modify_reg = 0xFF07,
+ iavf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
+ { iavf_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct iavf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define IAVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
+
+struct iavf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
+#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses iavf_aqc_switch_seid for the descriptor
+ */
+struct iavf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
+
+#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
+#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
+#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
+#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
+#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
+
+enum iavf_aq_link_speed {
+ IAVF_LINK_SPEED_UNKNOWN = 0,
+ IAVF_LINK_SPEED_100MB = BIT(IAVF_LINK_SPEED_100MB_SHIFT),
+ IAVF_LINK_SPEED_1GB = BIT(IAVF_LINK_SPEED_1000MB_SHIFT),
+ IAVF_LINK_SPEED_10GB = BIT(IAVF_LINK_SPEED_10GB_SHIFT),
+ IAVF_LINK_SPEED_40GB = BIT(IAVF_LINK_SPEED_40GB_SHIFT),
+ IAVF_LINK_SPEED_20GB = BIT(IAVF_LINK_SPEED_20GB_SHIFT),
+ IAVF_LINK_SPEED_25GB = BIT(IAVF_LINK_SPEED_25GB_SHIFT),
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct iavf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
+
+struct iavf_aqc_get_set_rss_key {
+#define IAVF_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
+
+struct iavf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
+
+struct iavf_aqc_get_set_rss_lut {
+#define IAVF_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
+#endif /* _IAVF_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
index bf2753146f30..2711573c14ec 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_alloc.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
@@ -20,12 +20,15 @@ enum iavf_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
- enum iavf_memory_type type,
- u64 size, u32 alignment);
-iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem);
-iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
- struct iavf_virt_mem *mem, u32 size);
-iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);
+enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem);
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size);
+enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem);
#endif /* _IAVF_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index aea45364fd1c..0c77e4171808 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -10,19 +10,19 @@
static
const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
-static struct i40e_client *vf_registered_client;
-static LIST_HEAD(i40e_devices);
+static struct iavf_client *vf_registered_client;
+static LIST_HEAD(iavf_devices);
static DEFINE_MUTEX(iavf_device_mutex);
-static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
+static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len);
-static int iavf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info);
+static int iavf_client_setup_qvlist(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_qvlist_info *qvlist_info);
-static struct i40e_ops iavf_lan_ops = {
+static struct iavf_ops iavf_lan_ops = {
.virtchnl_send = iavf_client_virtchnl_send,
.setup_qvlist = iavf_client_setup_qvlist,
};
@@ -33,11 +33,11 @@ static struct i40e_ops iavf_lan_ops = {
* @params: client param struct
**/
static
-void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
+void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params)
{
int i;
- memset(params, 0, sizeof(struct i40e_params));
+ memset(params, 0, sizeof(struct iavf_params));
params->mtu = vsi->netdev->mtu;
params->link_up = vsi->back->link_up;
@@ -57,7 +57,7 @@ void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
**/
void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
{
- struct i40e_client_instance *cinst;
+ struct iavf_client_instance *cinst;
if (!vsi)
return;
@@ -81,8 +81,8 @@ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
**/
void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
{
- struct i40e_client_instance *cinst;
- struct i40e_params params;
+ struct iavf_client_instance *cinst;
+ struct iavf_params params;
if (!vsi)
return;
@@ -110,7 +110,7 @@ void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
void iavf_notify_client_open(struct iavf_vsi *vsi)
{
struct iavf_adapter *adapter = vsi->back;
- struct i40e_client_instance *cinst = adapter->cinst;
+ struct iavf_client_instance *cinst = adapter->cinst;
int ret;
if (!cinst || !cinst->client || !cinst->client->ops ||
@@ -119,10 +119,10 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
"Cannot locate client instance open function\n");
return;
}
- if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) {
ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
}
}
@@ -132,17 +132,17 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
*
* Return 0 on success or < 0 on error
**/
-static int iavf_client_release_qvlist(struct i40e_info *ldev)
+static int iavf_client_release_qvlist(struct iavf_info *ldev)
{
struct iavf_adapter *adapter = ldev->vf;
- iavf_status err;
+ enum iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
- I40E_SUCCESS, NULL, 0, NULL);
+ IAVF_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
@@ -162,7 +162,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev)
void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
{
struct iavf_adapter *adapter = vsi->back;
- struct i40e_client_instance *cinst = adapter->cinst;
+ struct iavf_client_instance *cinst = adapter->cinst;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->close) {
@@ -172,7 +172,7 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
}
cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
}
/**
@@ -181,13 +181,13 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
*
* Returns cinst ptr on success, NULL on failure
**/
-static struct i40e_client_instance *
+static struct iavf_client_instance *
iavf_client_add_instance(struct iavf_adapter *adapter)
{
- struct i40e_client_instance *cinst = NULL;
+ struct iavf_client_instance *cinst = NULL;
struct iavf_vsi *vsi = &adapter->vsi;
struct netdev_hw_addr *mac = NULL;
- struct i40e_params params;
+ struct iavf_params params;
if (!vf_registered_client)
goto out;
@@ -205,7 +205,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.netdev = vsi->netdev;
cinst->lan_info.pcidev = adapter->pdev;
cinst->lan_info.fid = 0;
- cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF;
cinst->lan_info.hw_addr = adapter->hw.hw_addr;
cinst->lan_info.ops = &iavf_lan_ops;
cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR;
@@ -213,7 +213,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD;
iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
- set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+ set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
@@ -250,8 +250,8 @@ void iavf_client_del_instance(struct iavf_adapter *adapter)
**/
void iavf_client_subtask(struct iavf_adapter *adapter)
{
- struct i40e_client *client = vf_registered_client;
- struct i40e_client_instance *cinst;
+ struct iavf_client *client = vf_registered_client;
+ struct iavf_client_instance *cinst;
int ret = 0;
if (adapter->state < __IAVF_DOWN)
@@ -269,13 +269,13 @@ void iavf_client_subtask(struct iavf_adapter *adapter)
dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
client->name);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
/* Send an Open request to the client */
if (client->ops && client->ops->open)
ret = client->ops->open(&cinst->lan_info, client);
if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ set_bit(__IAVF_CLIENT_INSTANCE_OPENED,
&cinst->state);
else
/* remove client instance */
@@ -291,11 +291,11 @@ void iavf_client_subtask(struct iavf_adapter *adapter)
**/
int iavf_lan_add_device(struct iavf_adapter *adapter)
{
- struct i40e_device *ldev;
+ struct iavf_device *ldev;
int ret = 0;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
if (ldev->vf == adapter) {
ret = -EEXIST;
goto out;
@@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)
}
ldev->vf = adapter;
INIT_LIST_HEAD(&ldev->list);
- list_add(&ldev->list, &i40e_devices);
+ list_add(&ldev->list, &iavf_devices);
dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
adapter->hw.bus.bus_id, adapter->hw.bus.device,
adapter->hw.bus.func);
@@ -331,11 +331,11 @@ out:
**/
int iavf_lan_del_device(struct iavf_adapter *adapter)
{
- struct i40e_device *ldev, *tmp;
+ struct iavf_device *ldev, *tmp;
int ret = -ENODEV;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+ list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) {
if (ldev->vf == adapter) {
dev_info(&adapter->pdev->dev,
"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
@@ -357,24 +357,24 @@ int iavf_lan_del_device(struct iavf_adapter *adapter)
* @client: pointer to the registered client
*
**/
-static void iavf_client_release(struct i40e_client *client)
+static void iavf_client_release(struct iavf_client *client)
{
- struct i40e_client_instance *cinst;
- struct i40e_device *ldev;
+ struct iavf_client_instance *cinst;
+ struct iavf_device *ldev;
struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
cinst = adapter->cinst;
if (!cinst)
continue;
- if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cinst->lan_info, client,
false);
iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
dev_warn(&adapter->pdev->dev,
"Client %s instance closed\n", client->name);
@@ -392,13 +392,13 @@ static void iavf_client_release(struct i40e_client *client)
* @client: pointer to the registered client
*
**/
-static void iavf_client_prepare(struct i40e_client *client)
+static void iavf_client_prepare(struct iavf_client *client)
{
- struct i40e_device *ldev;
+ struct iavf_device *ldev;
struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
/* Signal the watchdog to service the client */
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
@@ -415,18 +415,18 @@ static void iavf_client_prepare(struct i40e_client *client)
*
* Return 0 on success or < 0 on error
**/
-static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
+static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len)
{
struct iavf_adapter *adapter = ldev->vf;
- iavf_status err;
+ enum iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
- I40E_SUCCESS, msg, len, NULL);
+ IAVF_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
@@ -442,16 +442,16 @@ static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
*
* Return 0 on success or < 0 on error
**/
-static int iavf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info)
+static int iavf_client_setup_qvlist(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_qvlist_info *qvlist_info)
{
struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
- struct i40e_qv_info *qv_info;
- iavf_status err;
+ struct iavf_qv_info *qv_info;
+ enum iavf_status err;
u32 v_idx, i;
- u32 msg_size;
+ size_t msg_size;
if (adapter->aq_required)
return -EAGAIN;
@@ -469,13 +469,12 @@ static int iavf_client_setup_qvlist(struct i40e_info *ldev,
}
v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
- msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
- (sizeof(struct virtchnl_iwarp_qv_info) *
- (v_qvlist_info->num_vectors - 1));
+ msg_size = struct_size(v_qvlist_info, qv_info,
+ v_qvlist_info->num_vectors - 1);
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
@@ -499,12 +498,12 @@ out:
}
/**
- * iavf_register_client - Register a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
+ * iavf_register_client - Register a iavf client driver with the L2 driver
+ * @client: pointer to the iavf_client struct
*
* Returns 0 on success or non-0 on error
**/
-int iavf_register_client(struct i40e_client *client)
+int iavf_register_client(struct iavf_client *client)
{
int ret = 0;
@@ -550,12 +549,12 @@ out:
EXPORT_SYMBOL(iavf_register_client);
/**
- * iavf_unregister_client - Unregister a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
+ * iavf_unregister_client - Unregister a iavf client driver with the L2 driver
+ * @client: pointer to the iavf_client struct
*
* Returns 0 on success or non-0 on error
**/
-int iavf_unregister_client(struct i40e_client *client)
+int iavf_unregister_client(struct iavf_client *client)
{
int ret = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index e216fc9dfd81..9a7cf39ea75a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -17,86 +17,86 @@
__stringify(IAVF_CLIENT_VERSION_MINOR) "." \
__stringify(IAVF_CLIENT_VERSION_BUILD)
-struct i40e_client_version {
+struct iavf_client_version {
u8 major;
u8 minor;
u8 build;
u8 rsvd;
};
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
+enum iavf_client_state {
+ __IAVF_CLIENT_NULL,
+ __IAVF_CLIENT_REGISTERED
};
-enum i40e_client_instance_state {
- __I40E_CLIENT_INSTANCE_NONE,
- __I40E_CLIENT_INSTANCE_OPENED,
+enum iavf_client_instance_state {
+ __IAVF_CLIENT_INSTANCE_NONE,
+ __IAVF_CLIENT_INSTANCE_OPENED,
};
-struct i40e_ops;
-struct i40e_client;
+struct iavf_ops;
+struct iavf_client;
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
+#define IAVF_QUEUE_TYPE_PE_AEQ 0x80
+#define IAVF_QUEUE_INVALID_IDX 0xFFFF
-struct i40e_qv_info {
+struct iavf_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
-struct i40e_qvlist_info {
+struct iavf_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct iavf_qv_info qv_info[1];
};
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF
/* set of LAN parameters useful for clients managed by LAN */
/* Struct to hold per priority info */
-struct i40e_prio_qos_params {
+struct iavf_prio_qos_params {
u16 qs_handle; /* qs handle for prio */
u8 tc; /* TC mapped to prio */
u8 reserved;
};
-#define I40E_CLIENT_MAX_USER_PRIORITY 8
+#define IAVF_CLIENT_MAX_USER_PRIORITY 8
/* Struct to hold Client QoS */
-struct i40e_qos_params {
- struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+struct iavf_qos_params {
+ struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY];
};
-struct i40e_params {
- struct i40e_qos_params qos;
+struct iavf_params {
+ struct iavf_qos_params qos;
u16 mtu;
u16 link_up; /* boolean */
};
/* Structure to hold LAN device info for a client device */
-struct i40e_info {
- struct i40e_client_version version;
+struct iavf_info {
+ struct iavf_client_version version;
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
-#define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
+#define IAVF_CLIENT_FTYPE_PF 0
+#define IAVF_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
void *vf; /* cast to iavf_adapter */
/* All L2 params that could change during the life span of the device
* and needs to be communicated to the client when they change
*/
- struct i40e_params params;
- struct i40e_ops *ops;
+ struct iavf_params params;
+ struct iavf_ops *ops;
u16 msix_count; /* number of msix vectors*/
/* Array down below will be dynamically allocated based on msix_count */
@@ -104,66 +104,66 @@ struct i40e_info {
u16 itr_index; /* Which ITR index the PE driver is suppose to use */
};
-struct i40e_ops {
+struct iavf_ops {
/* setup_q_vector_list enables queues with a particular vector */
- int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
- struct i40e_qvlist_info *qv_info);
+ int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client,
+ struct iavf_qvlist_info *qv_info);
- u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client,
u8 *msg, u16 len);
/* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
- void (*request_reset)(struct i40e_info *ldev,
- struct i40e_client *client);
+ void (*request_reset)(struct iavf_info *ldev,
+ struct iavf_client *client);
};
-struct i40e_client_ops {
+struct iavf_client_ops {
/* Should be called from register_client() or whenever the driver is
* ready to create a specific client instance.
*/
- int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+ int (*open)(struct iavf_info *ldev, struct iavf_client *client);
/* Should be closed when netdev is unavailable or when unregister
* call comes in. If the close happens due to a reset, set the reset
* bit to true.
*/
- void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ void (*close)(struct iavf_info *ldev, struct iavf_client *client,
bool reset);
/* called when some l2 managed parameters changes - mss */
- void (*l2_param_change)(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_params *params);
+ void (*l2_param_change)(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_params *params);
/* called when a message is received from the PF */
- int (*virtchnl_receive)(struct i40e_info *ldev,
- struct i40e_client *client,
+ int (*virtchnl_receive)(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len);
};
/* Client device */
-struct i40e_client_instance {
+struct iavf_client_instance {
struct list_head list;
- struct i40e_info lan_info;
- struct i40e_client *client;
+ struct iavf_info lan_info;
+ struct iavf_client *client;
unsigned long state;
};
-struct i40e_client {
+struct iavf_client {
struct list_head list; /* list of registered clients */
char name[IAVF_CLIENT_STR_LENGTH];
- struct i40e_client_version version;
+ struct iavf_client_version version;
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
-#define I40E_CLIENT_IWARP 0
- struct i40e_client_ops *ops; /* client ops provided by the client */
+#define IAVF_CLIENT_IWARP 0
+ struct iavf_client_ops *ops; /* client ops provided by the client */
};
/* used by clients */
-int iavf_register_client(struct i40e_client *client);
-int iavf_unregister_client(struct i40e_client *client);
+int iavf_register_client(struct iavf_client *client);
+int iavf_unregister_client(struct iavf_client *client);
#endif /* _IAVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 768369c89e77..8547fc8fdfd6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -2,7 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "iavf_type.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_prototype.h"
#include <linux/avf/virtchnl.h>
@@ -13,9 +13,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
{
- iavf_status status = 0;
+ enum iavf_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -32,7 +32,7 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw)
break;
}
} else {
- status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
}
hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
@@ -44,55 +44,55 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw)
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
**/
-const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
{
switch (aq_err) {
- case I40E_AQ_RC_OK:
+ case IAVF_AQ_RC_OK:
return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
+ case IAVF_AQ_RC_EPERM:
+ return "IAVF_AQ_RC_EPERM";
+ case IAVF_AQ_RC_ENOENT:
+ return "IAVF_AQ_RC_ENOENT";
+ case IAVF_AQ_RC_ESRCH:
+ return "IAVF_AQ_RC_ESRCH";
+ case IAVF_AQ_RC_EINTR:
+ return "IAVF_AQ_RC_EINTR";
+ case IAVF_AQ_RC_EIO:
+ return "IAVF_AQ_RC_EIO";
+ case IAVF_AQ_RC_ENXIO:
+ return "IAVF_AQ_RC_ENXIO";
+ case IAVF_AQ_RC_E2BIG:
+ return "IAVF_AQ_RC_E2BIG";
+ case IAVF_AQ_RC_EAGAIN:
+ return "IAVF_AQ_RC_EAGAIN";
+ case IAVF_AQ_RC_ENOMEM:
+ return "IAVF_AQ_RC_ENOMEM";
+ case IAVF_AQ_RC_EACCES:
+ return "IAVF_AQ_RC_EACCES";
+ case IAVF_AQ_RC_EFAULT:
+ return "IAVF_AQ_RC_EFAULT";
+ case IAVF_AQ_RC_EBUSY:
+ return "IAVF_AQ_RC_EBUSY";
+ case IAVF_AQ_RC_EEXIST:
+ return "IAVF_AQ_RC_EEXIST";
+ case IAVF_AQ_RC_EINVAL:
+ return "IAVF_AQ_RC_EINVAL";
+ case IAVF_AQ_RC_ENOTTY:
+ return "IAVF_AQ_RC_ENOTTY";
+ case IAVF_AQ_RC_ENOSPC:
+ return "IAVF_AQ_RC_ENOSPC";
+ case IAVF_AQ_RC_ENOSYS:
+ return "IAVF_AQ_RC_ENOSYS";
+ case IAVF_AQ_RC_ERANGE:
+ return "IAVF_AQ_RC_ERANGE";
+ case IAVF_AQ_RC_EFLUSHED:
+ return "IAVF_AQ_RC_EFLUSHED";
+ case IAVF_AQ_RC_BAD_ADDR:
+ return "IAVF_AQ_RC_BAD_ADDR";
+ case IAVF_AQ_RC_EMODE:
+ return "IAVF_AQ_RC_EMODE";
+ case IAVF_AQ_RC_EFBIG:
+ return "IAVF_AQ_RC_EFBIG";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
@@ -104,143 +104,143 @@ const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err)
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
-const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err)
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
{
switch (stat_err) {
case 0:
return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ case IAVF_ERR_NVM:
+ return "IAVF_ERR_NVM";
+ case IAVF_ERR_NVM_CHECKSUM:
+ return "IAVF_ERR_NVM_CHECKSUM";
+ case IAVF_ERR_PHY:
+ return "IAVF_ERR_PHY";
+ case IAVF_ERR_CONFIG:
+ return "IAVF_ERR_CONFIG";
+ case IAVF_ERR_PARAM:
+ return "IAVF_ERR_PARAM";
+ case IAVF_ERR_MAC_TYPE:
+ return "IAVF_ERR_MAC_TYPE";
+ case IAVF_ERR_UNKNOWN_PHY:
+ return "IAVF_ERR_UNKNOWN_PHY";
+ case IAVF_ERR_LINK_SETUP:
+ return "IAVF_ERR_LINK_SETUP";
+ case IAVF_ERR_ADAPTER_STOPPED:
+ return "IAVF_ERR_ADAPTER_STOPPED";
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ return "IAVF_ERR_INVALID_MAC_ADDR";
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
+ case IAVF_ERR_MASTER_REQUESTS_PENDING:
+ return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ return "IAVF_ERR_INVALID_LINK_SETTINGS";
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
+ case IAVF_ERR_RESET_FAILED:
+ return "IAVF_ERR_RESET_FAILED";
+ case IAVF_ERR_SWFW_SYNC:
+ return "IAVF_ERR_SWFW_SYNC";
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ return "IAVF_ERR_NO_AVAILABLE_VSI";
+ case IAVF_ERR_NO_MEMORY:
+ return "IAVF_ERR_NO_MEMORY";
+ case IAVF_ERR_BAD_PTR:
+ return "IAVF_ERR_BAD_PTR";
+ case IAVF_ERR_RING_FULL:
+ return "IAVF_ERR_RING_FULL";
+ case IAVF_ERR_INVALID_PD_ID:
+ return "IAVF_ERR_INVALID_PD_ID";
+ case IAVF_ERR_INVALID_QP_ID:
+ return "IAVF_ERR_INVALID_QP_ID";
+ case IAVF_ERR_INVALID_CQ_ID:
+ return "IAVF_ERR_INVALID_CQ_ID";
+ case IAVF_ERR_INVALID_CEQ_ID:
+ return "IAVF_ERR_INVALID_CEQ_ID";
+ case IAVF_ERR_INVALID_AEQ_ID:
+ return "IAVF_ERR_INVALID_AEQ_ID";
+ case IAVF_ERR_INVALID_SIZE:
+ return "IAVF_ERR_INVALID_SIZE";
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ return "IAVF_ERR_INVALID_ARP_INDEX";
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ return "IAVF_ERR_INVALID_FPM_FUNC_ID";
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ return "IAVF_ERR_QP_INVALID_MSG_SIZE";
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ return "IAVF_ERR_INVALID_FRAG_COUNT";
+ case IAVF_ERR_QUEUE_EMPTY:
+ return "IAVF_ERR_QUEUE_EMPTY";
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ return "IAVF_ERR_INVALID_ALIGNMENT";
+ case IAVF_ERR_FLUSHED_QUEUE:
+ return "IAVF_ERR_FLUSHED_QUEUE";
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
+ case IAVF_ERR_TIMEOUT:
+ return "IAVF_ERR_TIMEOUT";
+ case IAVF_ERR_OPCODE_MISMATCH:
+ return "IAVF_ERR_OPCODE_MISMATCH";
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ return "IAVF_ERR_CQP_COMPL_ERROR";
+ case IAVF_ERR_INVALID_VF_ID:
+ return "IAVF_ERR_INVALID_VF_ID";
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ return "IAVF_ERR_INVALID_HMCFN_ID";
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ return "IAVF_ERR_BACKING_PAGE_ERROR";
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ return "IAVF_ERR_INVALID_PBLE_INDEX";
+ case IAVF_ERR_INVALID_SD_INDEX:
+ return "IAVF_ERR_INVALID_SD_INDEX";
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case IAVF_ERR_INVALID_SD_TYPE:
+ return "IAVF_ERR_INVALID_SD_TYPE";
+ case IAVF_ERR_MEMCPY_FAILED:
+ return "IAVF_ERR_MEMCPY_FAILED";
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case IAVF_ERR_SRQ_ENABLED:
+ return "IAVF_ERR_SRQ_ENABLED";
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_ERROR";
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return "IAVF_ERR_BUF_TOO_SHORT";
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ return "IAVF_ERR_ADMIN_QUEUE_FULL";
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case IAVF_ERR_BAD_IWARP_CQE:
+ return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_NVM_BLANK_MODE:
+ return "IAVF_ERR_NVM_BLANK_MODE";
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ return "IAVF_ERR_NOT_IMPLEMENTED";
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ return "IAVF_ERR_DIAG_TEST_FAILED";
+ case IAVF_ERR_NOT_READY:
+ return "IAVF_ERR_NOT_READY";
+ case IAVF_NOT_SUPPORTED:
+ return "IAVF_NOT_SUPPORTED";
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ return "IAVF_ERR_FIRMWARE_API_VERSION";
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -260,7 +260,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err)
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
if ((!(mask & hw->debug_mask)) || !desc)
@@ -327,17 +327,17 @@ bool iavf_check_asq_alive(struct iavf_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_queue_shutdown *cmd =
+ (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ enum iavf_status status;
- iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown);
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown);
if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING);
status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
@@ -354,43 +354,43 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
*
* Internal function to get or set RSS look up table
**/
-static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- iavf_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_lut *cmd_resp =
+ (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_lut);
+ iavf_aqc_opc_set_rss_lut);
else
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_lut);
+ iavf_aqc_opc_get_rss_lut);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
cmd_resp->vsi_id =
cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
if (pf_lut)
cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
else
cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
@@ -407,8 +407,8 @@ static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -424,8 +424,8 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -439,33 +439,33 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static
+static enum
iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
+ struct iavf_aqc_get_set_rss_key_data *key,
bool set)
{
- iavf_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
- u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_key *cmd_resp =
+ (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_key);
+ iavf_aqc_opc_set_rss_key);
else
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_key);
+ iavf_aqc_opc_get_rss_key);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
cmd_resp->vsi_id =
cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -479,8 +479,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* @key: pointer to key info struct
*
**/
-iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
{
return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -493,8 +493,8 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
*
* set the RSS key per VSI
**/
-iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
{
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -515,7 +515,7 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* IF NOT iavf_ptype_lookup[ptype].known
* THEN
* Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
* Use the rest of the fields to look at the tunnels, inner protocols, etc
* ELSE
* Use the enum iavf_rx_l2_ptype to decode the packet type
@@ -877,24 +877,25 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
* is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
* completion before returning.
**/
-iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
- enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details)
{
- struct i40e_asq_cmd_details details;
- struct i40e_aq_desc desc;
- iavf_status status;
+ struct iavf_asq_cmd_details details;
+ struct iavf_aq_desc desc;
+ enum iavf_status status;
- iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
- | I40E_AQ_FLAG_RD));
- if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF
+ | IAVF_AQ_FLAG_RD));
+ if (msglen > IAVF_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
@@ -948,7 +949,7 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
-iavf_status iavf_vf_reset(struct iavf_hw *hw)
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
{
return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 9f87304109fe..5bdcd78f216d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -280,10 +280,10 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
cmd->base.speed = SPEED_40000;
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
#ifdef SPEED_25000
cmd->base.speed = SPEED_25000;
#else
@@ -291,16 +291,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
cmd->base.speed = SPEED_20000;
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
cmd->base.speed = SPEED_10000;
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
cmd->base.speed = SPEED_1000;
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
cmd->base.speed = SPEED_100;
break;
default:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4569d69a2b55..44d2150adb37 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -66,14 +66,14 @@ static struct workqueue_struct *iavf_wq;
* @size: size of memory requested
* @alignment: what to align the allocation to
**/
-iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
- struct iavf_dma_mem *mem,
- u64 size, u32 alignment)
+enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ u64 size, u32 alignment)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
mem->size = ALIGN(size, alignment);
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
@@ -81,7 +81,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
if (mem->va)
return 0;
else
- return I40E_ERR_NO_MEMORY;
+ return IAVF_ERR_NO_MEMORY;
}
/**
@@ -89,12 +89,13 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
+enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem || !mem->va)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
dma_free_coherent(&adapter->pdev->dev, mem->size,
mem->va, (dma_addr_t)mem->pa);
return 0;
@@ -106,11 +107,11 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
- struct iavf_virt_mem *mem, u32 size)
+enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size)
{
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
mem->size = size;
mem->va = kzalloc(size, GFP_KERNEL);
@@ -118,7 +119,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
if (mem->va)
return 0;
else
- return I40E_ERR_NO_MEMORY;
+ return IAVF_ERR_NO_MEMORY;
}
/**
@@ -126,10 +127,11 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
+enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem)
{
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
/* it's ok to kfree a NULL pointer */
kfree(mem->va);
@@ -1227,8 +1229,8 @@ out:
**/
static int iavf_config_rss_aq(struct iavf_adapter *adapter)
{
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
+ struct iavf_aqc_get_set_rss_key_data *rss_key =
+ (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
int ret = 0;
@@ -2020,9 +2022,9 @@ static void iavf_adminq_task(struct work_struct *work)
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
- iavf_status ret, v_ret;
+ enum iavf_status ret, v_ret;
u32 val, oldval;
u16 pending;
@@ -2037,7 +2039,7 @@ static void iavf_adminq_task(struct work_struct *work)
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */
@@ -2239,22 +2241,22 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
int speed = 0, ret = 0;
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
speed = 40000;
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
speed = 25000;
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
speed = 20000;
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
speed = 10000;
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
speed = 1000;
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
speed = 100;
break;
default:
@@ -2508,7 +2510,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2518,7 +2520,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2553,7 +2555,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2577,7 +2579,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2587,13 +2589,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -2614,7 +2616,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -2624,7 +2626,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -2649,7 +2651,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2659,7 +2661,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
if (match.key->dst) {
@@ -3353,7 +3355,7 @@ static void iavf_init_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- int err, bufsz;
+ int err;
switch (adapter->state) {
case __IAVF_STARTUP:
@@ -3402,7 +3404,7 @@ static void iavf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver(adapter);
if (err) {
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
@@ -3423,18 +3425,17 @@ static void iavf_init_task(struct work_struct *work)
case __IAVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
- bufsz = sizeof(struct virtchnl_vf_resource) +
- (IAVF_MAX_VF_VSI *
- sizeof(struct virtchnl_vsi_resource));
- adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ adapter->vf_res = kzalloc(struct_size(adapter->vf_res,
+ vsi_res, IAVF_MAX_VF_VSI),
+ GFP_KERNEL);
if (!adapter->vf_res)
goto err;
}
err = iavf_get_vf_config(adapter);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
err = iavf_send_vf_config_msg(adapter);
goto err;
- } else if (err == I40E_ERR_PARAM) {
+ } else if (err == IAVF_ERR_PARAM) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
index e6e0b0328706..d39684558597 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_osdep.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
@@ -46,7 +46,6 @@ struct iavf_virt_mem {
#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
- __attribute__ ((format(gnu_printf, 3, 4)));
+ __printf(3, 4);
-typedef enum iavf_status_code iavf_status;
#endif /* _IAVF_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index d6685103af39..edebfbbcffdc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -16,39 +16,40 @@
*/
/* adminq functions */
-iavf_status iavf_init_adminq(struct iavf_hw *hw);
-iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
-void i40e_adminq_init_ring_data(struct iavf_hw *hw);
-iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+void iavf_adminq_init_ring_data(struct iavf_hw *hw);
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *events_pending);
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details);
bool iavf_asq_done(struct iavf_hw *hw);
/* debug function for adminq */
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void i40e_idle_aq(struct iavf_hw *hw);
+void iavf_idle_aq(struct iavf_hw *hw);
void iavf_resume_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
-iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
-const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err);
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
-iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
-iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
@@ -59,9 +60,10 @@ static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
-iavf_status iavf_vf_reset(struct iavf_hw *hw);
-iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
- enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details);
#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46742fab7b8c..46e3d1f6b604 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -5,74 +5,74 @@
#define _IAVF_STATUS_H_
/* Error Codes */
-enum iavf_status_code {
- I40E_SUCCESS = 0,
- I40E_ERR_NVM = -1,
- I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
- I40E_ERR_CONFIG = -4,
- I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
- I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
- I40E_ERR_INVALID_MAC_ADDR = -10,
- I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_MASTER_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
- I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
- I40E_ERR_NO_AVAILABLE_VSI = -17,
- I40E_ERR_NO_MEMORY = -18,
- I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
- I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
- I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
- I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
- I40E_ERR_INVALID_SD_INDEX = -45,
- I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
- I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
- I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
- I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
- I40E_ERR_ADMIN_QUEUE_ERROR = -53,
- I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
- I40E_ERR_BUF_TOO_SHORT = -55,
- I40E_ERR_ADMIN_QUEUE_FULL = -56,
- I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
- I40E_ERR_NVM_BLANK_MODE = -59,
- I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
- I40E_ERR_DIAG_TEST_FAILED = -62,
- I40E_ERR_NOT_READY = -63,
- I40E_NOT_SUPPORTED = -64,
- I40E_ERR_FIRMWARE_API_VERSION = -65,
- I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+enum iavf_status {
+ IAVF_SUCCESS = 0,
+ IAVF_ERR_NVM = -1,
+ IAVF_ERR_NVM_CHECKSUM = -2,
+ IAVF_ERR_PHY = -3,
+ IAVF_ERR_CONFIG = -4,
+ IAVF_ERR_PARAM = -5,
+ IAVF_ERR_MAC_TYPE = -6,
+ IAVF_ERR_UNKNOWN_PHY = -7,
+ IAVF_ERR_LINK_SETUP = -8,
+ IAVF_ERR_ADAPTER_STOPPED = -9,
+ IAVF_ERR_INVALID_MAC_ADDR = -10,
+ IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_INVALID_LINK_SETTINGS = -13,
+ IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ IAVF_ERR_RESET_FAILED = -15,
+ IAVF_ERR_SWFW_SYNC = -16,
+ IAVF_ERR_NO_AVAILABLE_VSI = -17,
+ IAVF_ERR_NO_MEMORY = -18,
+ IAVF_ERR_BAD_PTR = -19,
+ IAVF_ERR_RING_FULL = -20,
+ IAVF_ERR_INVALID_PD_ID = -21,
+ IAVF_ERR_INVALID_QP_ID = -22,
+ IAVF_ERR_INVALID_CQ_ID = -23,
+ IAVF_ERR_INVALID_CEQ_ID = -24,
+ IAVF_ERR_INVALID_AEQ_ID = -25,
+ IAVF_ERR_INVALID_SIZE = -26,
+ IAVF_ERR_INVALID_ARP_INDEX = -27,
+ IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ IAVF_ERR_INVALID_FRAG_COUNT = -31,
+ IAVF_ERR_QUEUE_EMPTY = -32,
+ IAVF_ERR_INVALID_ALIGNMENT = -33,
+ IAVF_ERR_FLUSHED_QUEUE = -34,
+ IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ IAVF_ERR_TIMEOUT = -37,
+ IAVF_ERR_OPCODE_MISMATCH = -38,
+ IAVF_ERR_CQP_COMPL_ERROR = -39,
+ IAVF_ERR_INVALID_VF_ID = -40,
+ IAVF_ERR_INVALID_HMCFN_ID = -41,
+ IAVF_ERR_BACKING_PAGE_ERROR = -42,
+ IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ IAVF_ERR_INVALID_PBLE_INDEX = -44,
+ IAVF_ERR_INVALID_SD_INDEX = -45,
+ IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ IAVF_ERR_INVALID_SD_TYPE = -47,
+ IAVF_ERR_MEMCPY_FAILED = -48,
+ IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ IAVF_ERR_SRQ_ENABLED = -52,
+ IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ IAVF_ERR_BUF_TOO_SHORT = -55,
+ IAVF_ERR_ADMIN_QUEUE_FULL = -56,
+ IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_NVM_BLANK_MODE = -59,
+ IAVF_ERR_NOT_IMPLEMENTED = -60,
+ IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ IAVF_ERR_DIAG_TEST_FAILED = -62,
+ IAVF_ERR_NOT_READY = -63,
+ IAVF_NOT_SUPPORTED = -64,
+ IAVF_ERR_FIRMWARE_API_VERSION = -65,
+ IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _IAVF_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 1474f5539751..1058e68a02b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -17,8 +17,8 @@
/* See trace-events-sample.h for a detailed description of why this
* guard clause is different from most normal include files.
*/
-#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
-#define _I40E_TRACE_H_
+#if !defined(_IAVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _IAVF_TRACE_H_
#include <linux/tracepoint.h>
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d1509d57f7..6d43cbe29c49 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -379,19 +379,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
unsigned int divisor;
switch (q_vector->adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
break;
- case I40E_LINK_SPEED_25GB:
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_20GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
break;
default:
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
break;
- case I40E_LINK_SPEED_1GB:
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_100MB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index ca89583613fb..7190a40c540c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -7,7 +7,7 @@
#include "iavf_status.h"
#include "iavf_osdep.h"
#include "iavf_register.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_devids.h"
#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
@@ -21,7 +21,7 @@
/* forward declaration */
struct iavf_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *);
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
/* Data type manipulation macros. */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index e64751da0921..3eea35cee25a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -22,7 +22,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- iavf_status err;
+ enum iavf_status err;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
@@ -41,7 +41,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
*
* Send API version admin queue message to the PF. The reply is not checked
* in this function. Returns 0 if the message was successfully
- * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/
int iavf_send_api_ver(struct iavf_adapter *adapter)
{
@@ -60,16 +60,16 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
*
* Compare API versions with the PF. Must be called after admin queue is
* initialized. Returns 0 if API versions match, -EIO if they do not,
- * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
+ * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
* from the firmware are propagated.
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
struct virtchnl_version_info *pf_vvi;
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops op;
- iavf_status err;
+ enum iavf_status err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
@@ -92,7 +92,7 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter)
}
- err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
@@ -123,7 +123,7 @@ out:
*
* Send VF configuration request admin queue message to the PF. The reply
* is not checked in this function. Returns 0 if the message was
- * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/
int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
{
@@ -189,9 +189,9 @@ static void iavf_validate_num_queues(struct iavf_adapter *adapter)
int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops op;
- iavf_status err;
+ enum iavf_status err;
u16 len;
len = sizeof(struct virtchnl_vf_resource) +
@@ -216,7 +216,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
break;
}
- err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -416,7 +416,7 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num)
return -EBUSY;
}
- vfres.num_queue_pairs = num;
+ vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -938,22 +938,22 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
}
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
speed = "40 G";
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
speed = "25 G";
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
speed = "20 G";
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
speed = "10 G";
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
speed = "1000 M";
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
speed = "100 M";
break;
default:
@@ -1184,8 +1184,8 @@ void iavf_request_reset(struct iavf_adapter *adapter)
* This function handles the reply messages.
**/
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
- enum virtchnl_ops v_opcode, iavf_status v_retval,
- u8 *msg, u16 msglen)
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval, u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 792e6e42030e..9ee6b55553c0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -44,15 +44,22 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
-#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
+#define ICE_MIN_NUM_DESC 64
#define ICE_MAX_NUM_DESC 8160
-/* set default number of Rx/Tx descriptors to the minimum between
- * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+#define ICE_DFLT_MIN_RX_DESC 512
+/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the
+ * number of descriptors to fill up an entire page is greater than or equal to
+ * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to
+ * ICE_DFLT_MIN_RX_DESC
+ */
+#define ICE_DFLT_NUM_RX_DESC \
+ min_t(u16, ICE_MAX_NUM_DESC, \
+ max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE), \
+ ICE_DFLT_MIN_RX_DESC))
+/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC
+ * and the number of descriptors to fill up an entire page
*/
-#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
- ALIGN(PAGE_SIZE / \
- sizeof(union ice_32byte_rx_desc), \
- ICE_REQ_DESC_MULTIPLE))
#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
ALIGN(PAGE_SIZE / \
sizeof(struct ice_tx_desc), \
@@ -160,7 +167,7 @@ struct ice_tc_cfg {
struct ice_res_tracker {
u16 num_entries;
- u16 search_hint;
+ u16 end;
u16 list[1];
};
@@ -182,6 +189,7 @@ struct ice_sw {
};
enum ice_state {
+ __ICE_TESTING,
__ICE_DOWN,
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
@@ -244,8 +252,7 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
- int sw_base_vector; /* Irq base for OS reserved vectors */
- int hw_base_vector; /* HW (absolute) index of a vector */
+ int base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -277,10 +284,10 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
- u8 irqs_ready;
- u8 current_isup; /* Sync 'link up' logging */
- u8 stat_offsets_loaded;
- u8 vlan_ena;
+ u8 irqs_ready:1;
+ u8 current_isup:1; /* Sync 'link up' logging */
+ u8 stat_offsets_loaded:1;
+ u8 vlan_ena:1;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -330,7 +337,7 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ENABLE_FW_LLDP,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -340,10 +347,12 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *sw_irq_tracker;
-
- /* HW reserved Interrupts for this PF */
- struct ice_res_tracker *hw_irq_tracker;
+ struct ice_res_tracker *irq_tracker;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u16 sriov_base_vector;
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
@@ -365,10 +374,8 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 sw_oicr_idx; /* Other interrupt cause SW vector index */
+ u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
- u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
- u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
@@ -384,7 +391,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
- u8 stat_prev_loaded; /* has previous stats been loaded */
+ u8 stat_prev_loaded:1; /* has previous stats been loaded */
#ifdef CONFIG_DCB
u16 dcbx_cap;
#endif /* CONFIG_DCB */
@@ -392,6 +399,7 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ u32 sw_int_count;
};
struct ice_netdev_priv {
@@ -409,7 +417,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
- ((struct ice_pf *)hw->back)->hw_oicr_idx;
+ ((struct ice_pf *)hw->back)->oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -444,17 +452,22 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
return NULL;
}
+int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
+int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
-void ice_napi_del(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
#endif /* CONFIG_DCB */
+int ice_open(struct net_device *netdev);
+int ice_stop(struct net_device *netdev);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6ef083002f5b..765e3c2ed045 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -35,8 +35,8 @@ struct ice_aqc_get_ver {
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
-#define ICE_AQC_DRIVER_UNLOADING BIT(0)
__le32 driver_unloading;
+#define ICE_AQC_DRIVER_UNLOADING BIT(0)
u8 reserved[12];
};
@@ -120,11 +120,9 @@ struct ice_aqc_manage_mac_read {
#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
#define ICE_AQC_MAN_MAC_READ_S 4
#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
- u8 lport_num;
- u8 lport_num_valid;
-#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0)
+ u8 rsvd[2];
u8 num_addr; /* Used in response */
- u8 reserved[3];
+ u8 rsvd1[3];
__le32 addr_high;
__le32 addr_low;
};
@@ -140,7 +138,7 @@ struct ice_aqc_manage_mac_read_resp {
/* Manage MAC address, write command - direct (0x0108) */
struct ice_aqc_manage_mac_write {
- u8 port_num;
+ u8 rsvd;
u8 flags;
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
@@ -920,6 +918,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EN_LINK BIT(3)
#define ICE_AQC_PHY_AN_MODE BIT(4)
#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
+#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
+#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0)
u8 low_power_ctrl;
#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
__le16 eee_cap;
@@ -932,6 +932,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
__le16 eeer_value;
u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
u8 link_fec_options;
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
@@ -940,6 +941,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0)
+ u8 rsvd1; /* Byte 35 reserved */
u8 extended_compliance_code;
#define ICE_MODULE_TYPE_TOTAL_BYTE 3
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
@@ -954,13 +957,14 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
#define ICE_AQC_QUAL_MOD_COUNT_MAX 16
struct {
u8 v_oui[3];
- u8 rsvd1;
+ u8 rsvd3;
u8 v_part[16];
__le32 v_rev;
- __le64 rsvd8;
+ __le64 rsvd4;
} qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
};
@@ -1062,6 +1066,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
+#define ICE_AQ_FEC_MASK ICE_M(0x7, 0)
/* Pacing Config */
#define ICE_AQ_CFG_PACING_S 3
#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
@@ -1112,6 +1117,14 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6];
};
+/* Set MAC Loopback command (direct 0x0620) */
+struct ice_aqc_set_mac_lb {
+ u8 lb_mode;
+#define ICE_AQ_MAC_LB_EN BIT(0)
+#define ICE_AQ_MAC_LB_OSC_CLK BIT(1)
+ u8 reserved[15];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1145,6 +1158,17 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/* NVM Checksum Command (direct, 0x0706) */
+struct ice_aqc_nvm_checksum {
+ u8 flags;
+#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0)
+#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1249,7 +1273,7 @@ struct ice_aqc_get_cee_dcb_cfg_resp {
};
/* Set Local LLDP MIB (indirect 0x0A08)
- * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBX
*/
struct ice_aqc_lldp_set_local_mib {
u8 type;
@@ -1266,7 +1290,7 @@ struct ice_aqc_lldp_set_local_mib {
};
/* Stop/Start LLDP Agent (direct 0x0A09)
- * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * Used for stopping/starting specific LLDP agent. e.g. DCBX.
* The same structure is used for the response, with the command field
* being used as the status field.
*/
@@ -1539,6 +1563,7 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
@@ -1554,6 +1579,7 @@ struct ice_aq_desc {
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
+ struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
@@ -1642,10 +1668,12 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_checksum = 0x0706,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@@ -1671,6 +1699,7 @@ enum ice_adminq_opc {
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
+ ice_aqc_opc_fw_logging_info = 0xFF10,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index da7878529929..2e0731c1e1a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -51,9 +51,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
*/
void ice_dev_onetime_setup(struct ice_hw *hw)
{
- /* configure Rx - set non pxe mode */
- wr32(hw, GLLAN_RCTL_0, 0x1);
-
#define MBX_PF_VT_PFALLOC 0x00231E80
/* set VFs per PF */
wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
@@ -307,6 +304,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
hw_link_info->an_info = link_data.an_info;
hw_link_info->ext_info = link_data.ext_info;
hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
/* update fc info */
@@ -476,6 +475,49 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
/**
+ * ice_get_fw_log_cfg - get FW logging configuration
+ * @hw: pointer to the HW struct
+ */
+static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+{
+ struct ice_aqc_fw_logging_data *config;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 size;
+
+ size = ICE_FW_LOG_DESC_SIZE_MAX;
+ config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
+ if (!config)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
+ if (!status) {
+ u16 i;
+
+ /* Save FW logging information into the HW structure */
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ u16 v, m, flgs;
+
+ v = le16_to_cpu(config->entry[i]);
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
+ flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
+
+ if (m < ICE_AQC_FW_LOG_ID_MAX)
+ hw->fw_log.evnts[m].cur = flgs;
+ }
+ }
+
+ devm_kfree(ice_hw_to_dev(hw), config);
+
+ return status;
+}
+
+/**
* ice_cfg_fw_log - configure FW logging
* @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
@@ -529,6 +571,11 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
(!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
return 0;
+ /* Get current FW log settings */
+ status = ice_get_fw_log_cfg(hw);
+ if (status)
+ return status;
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
cmd = &desc.params.fw_logging;
@@ -634,17 +681,17 @@ out:
*/
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
{
- ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
+ ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
}
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
* @hw: pointer to the HW struct
*
- * Determines the itr/intrl granularities based on the maximum aggregate
+ * Determines the ITR/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
@@ -815,6 +862,10 @@ err_unroll_cqinit:
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
+ *
+ * This should be called only during nominal operation, not as a result of
+ * ice_init_hw() failing since ice_init_hw() will take care of unrolling
+ * applicable initializations if it fails for any reason.
*/
void ice_deinit_hw(struct ice_hw *hw)
{
@@ -1447,6 +1498,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
struct ice_hw_func_caps *func_p = NULL;
struct ice_hw_dev_caps *dev_p = NULL;
struct ice_hw_common_caps *caps;
+ char const *prefix;
u32 i;
if (!buf)
@@ -1457,9 +1509,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (opc == ice_aqc_opc_list_dev_caps) {
dev_p = &hw->dev_caps;
caps = &dev_p->common_cap;
+ prefix = "dev cap";
} else if (opc == ice_aqc_opc_list_func_caps) {
func_p = &hw->func_caps;
caps = &func_p->common_cap;
+ prefix = "func cap";
} else {
ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
return;
@@ -1475,28 +1529,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Valid Functions = %d\n",
+ "%s: valid functions = %d\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ "%s: SR-IOV = %d\n", prefix,
+ caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VF:
if (dev_p) {
dev_p->num_vfs_exposed = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VFs exposed = %d\n",
+ "%s: VFs exposed = %d\n", prefix,
dev_p->num_vfs_exposed);
} else if (func_p) {
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VFs allocated = %d\n",
+ "%s: VFs allocated = %d\n", prefix,
func_p->num_allocd_vfs);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VF base_id = %d\n",
+ "%s: VF base_id = %d\n", prefix,
func_p->vf_base_id);
}
break;
@@ -1504,69 +1559,69 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Dev.VSI cnt = %d\n",
+ "%s: num VSI alloc to host = %d\n",
+ prefix,
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
func_p->guar_num_vsi =
ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Func.VSI cnt = %d\n",
- number);
+ "%s: num guaranteed VSI (fw) = %d\n",
+ prefix, number);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num guaranteed VSI = %d\n",
+ prefix, func_p->guar_num_vsi);
}
break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: RSS table size = %d\n",
+ "%s: RSS table size = %d\n", prefix,
caps->rss_table_size);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: RSS table width = %d\n",
+ "%s: RSS table width = %d\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
+ "%s: num Rx queues = %d\n", prefix,
+ caps->num_rxq);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Rx first queue ID = %d\n",
+ "%s: Rx first queue ID = %d\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Num Tx Qs = %d\n", caps->num_txq);
+ "%s: num Tx queues = %d\n", prefix,
+ caps->num_txq);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Tx first queue ID = %d\n",
+ "%s: Tx first queue ID = %d\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: MSIX vector count = %d\n",
+ "%s: MSIX vector count = %d\n", prefix,
caps->num_msix_vectors);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: MSIX first vector index = %d\n",
+ "%s: MSIX first vector index = %d\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
- if (dev_p)
- ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Dev.MaxMTU = %d\n",
- caps->max_mtu);
- else if (func_p)
- ice_debug(hw, ICE_DBG_INIT,
- "HW caps: func.MaxMTU = %d\n",
- caps->max_mtu);
+ ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
+ prefix, caps->max_mtu);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Unknown capability[%d]: 0x%x\n", i,
- cap);
+ "%s: unknown capability[%d]: 0x%x\n", prefix,
+ i, cap);
break;
}
}
@@ -1947,36 +2002,37 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
*/
enum ice_status ice_update_link_info(struct ice_port_info *pi)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_phy_info *phy_info;
+ struct ice_link_status *li;
enum ice_status status;
- struct ice_hw *hw;
if (!pi)
return ICE_ERR_PARAM;
- hw = pi->hw;
-
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
- if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ li = &pi->phy.link_info;
- phy_info = &pi->phy;
status = ice_aq_get_link_info(pi, true, NULL, NULL);
if (status)
- goto out;
+ return status;
+
+ if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_hw *hw;
+
+ hw = pi->hw;
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
+ GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
- if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
pcaps, NULL);
- if (status)
- goto out;
+ if (!status)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
- memcpy(phy_info->link_info.module_type, &pcaps->module_type,
- sizeof(phy_info->link_info.module_type));
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
}
-out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
+
return status;
}
@@ -2081,6 +2137,74 @@ out:
}
/**
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy date from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
+ * data structure
+ */
+void
+ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl = caps->low_power_ctrl;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+}
+
+/**
+ * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @cfg: PHY configuration data to set FEC mode
+ * @fec: FEC mode to configure
+ *
+ * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
+ * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
+ * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
+ */
+void
+ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
+{
+ switch (fec) {
+ case ICE_FEC_BASER:
+ /* Clear auto FEC and RS bits, and AND BASE-R ability
+ * bits and OR request bits.
+ */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
+ break;
+ case ICE_FEC_RS:
+ /* Clear auto FEC and BASE-R bits, and AND RS ability
+ * bits and OR request bits.
+ */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ break;
+ case ICE_FEC_NONE:
+ /* Clear auto FEC and all FEC option bits. */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
+ break;
+ case ICE_FEC_AUTO:
+ /* AND auto FEC bit, and all caps bits. */
+ cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ break;
+ }
+}
+
+/**
* ice_get_link_status - get status of the HW network link
* @pi: port information structure
* @link_up: pointer to bool (true/false = linkup/linkdown)
@@ -2169,6 +2293,29 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
}
/**
+ * ice_aq_set_mac_loopback
+ * @hw: pointer to the HW struct
+ * @ena_lpbk: Enable or Disable loopback
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable/disable loopback on a given port
+ */
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_lb *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_lb;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
+ if (ena_lpbk)
+ cmd->lb_mode = ICE_AQ_MAC_LB_EN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2552,7 +2699,7 @@ do_aq:
ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
vmvf_num, hw->adminq.sq_last_status);
else
- ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
le16_to_cpu(qg_list[0].q_id[0]),
hw->adminq.sq_last_status);
}
@@ -2924,7 +3071,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
-
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index f1ddebf45231..d1f8353fe6bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,6 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+
void
ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
@@ -84,7 +86,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-
+void
+ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
+void
+ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
@@ -95,6 +101,9 @@ enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
+
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index cc8cb5fdcdc1..e91ac4df0242 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -439,7 +439,7 @@ do { \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free dma head */ \
+ /* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index e0585394d984..44945c2165d8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -35,7 +35,7 @@ enum ice_ctl_q {
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring {
- void *dma_head; /* Virtual address to dma head */
+ void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 8bbf48e04a1c..c2002ded65f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -82,12 +82,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* @hw: pointer to the HW struct
* @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
* False if LLDP Agent needs to be Stopped
+ * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across
+ * reboots
* @cd: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
enum ice_status
-ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
@@ -100,17 +102,22 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
if (shutdown_lldp_agent)
cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_start_lldp
* @hw: pointer to the HW struct
+ * @persist: True if Start of LLDP Agent needs to be persistent across reboots
* @cd: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -121,6 +128,9 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
cmd->command = ICE_AQ_LLDP_AGENT_START;
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -163,7 +173,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
*
* Get the DCBX status from the Firmware
*/
-u8 ice_get_dcbx_status(struct ice_hw *hw)
+static u8 ice_get_dcbx_status(struct ice_hw *hw)
{
u32 reg;
@@ -614,7 +624,8 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static enum ice_status
+ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
enum ice_status ret = 0;
@@ -658,13 +669,13 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
/**
* ice_aq_get_dcb_cfg
* @hw: pointer to the HW struct
- * @mib_type: mib type for the query
+ * @mib_type: MIB type for the query
* @bridgetype: bridge type for the query (remote)
* @dcbcfg: store for LLDPDU data
*
* Query DCB configuration from the firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
@@ -689,13 +700,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
}
/**
- * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
* @hw: pointer to the HW struct
- * @start_dcbx_agent: True if DCBx Agent needs to be started
- * False if DCBx Agent needs to be stopped
- * @dcbx_agent_status: FW indicates back the DCBx agent status
- * True if DCBx Agent is active
- * False if DCBx Agent is stopped
+ * @start_dcbx_agent: True if DCBX Agent needs to be started
+ * False if DCBX Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBX agent status
+ * True if DCBX Agent is active
+ * False if DCBX Agent is stopped
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index e7d4416e3a66..522e1452abe2 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -120,8 +120,9 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
-u8 ice_get_dcbx_status(struct ice_hw *hw);
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw);
@@ -131,9 +132,10 @@ ice_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
enum ice_status
-ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
@@ -144,6 +146,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
static inline enum ice_status
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
+ bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
{
return 0;
@@ -151,6 +154,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
static inline enum ice_status
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
+ bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
{
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 3e81af1884fc..fe88b127ca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -120,12 +120,14 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
- if (ret)
+ if (ret) {
dev_err(&pf->pdev->dev,
"Failed to config TC for VSI index: %d\n",
pf->vsi[v]->idx);
- else
- ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ continue;
+ }
+
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
}
}
@@ -133,8 +135,10 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
+ * @locked: is the RTNL held
*/
-static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
+static
+int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
@@ -163,7 +167,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
- rtnl_lock();
+ if (!locked)
+ rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
@@ -192,7 +197,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
out:
ice_pf_ena_all_vsi(pf, true);
- rtnl_unlock();
+ if (!locked)
+ rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg);
return ret;
}
@@ -271,15 +277,16 @@ dcb_error:
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
- ice_pf_dcb_cfg(pf, prev_cfg);
+ ice_pf_dcb_cfg(pf, prev_cfg, false);
devm_kfree(&pf->pdev->dev, prev_cfg);
}
/**
* ice_dcb_init_cfg - set the initial DCB config in SW
- * @pf: pf to apply config to
+ * @pf: PF to apply config to
+ * @locked: Is the RTNL held
*/
-static int ice_dcb_init_cfg(struct ice_pf *pf)
+static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *newcfg;
struct ice_port_info *pi;
@@ -294,7 +301,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
- if (ice_pf_dcb_cfg(pf, newcfg))
+ if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL;
devm_kfree(&pf->pdev->dev, newcfg);
@@ -304,9 +311,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
/**
* ice_dcb_sw_default_config - Apply a default DCB config
- * @pf: pf to apply config to
+ * @pf: PF to apply config to
+ * @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -338,7 +346,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
dcbcfg->app[0].priority = 3;
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
- ret = ice_pf_dcb_cfg(pf, dcbcfg);
+ ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
devm_kfree(&pf->pdev->dev, dcbcfg);
if (ret)
return ret;
@@ -348,9 +356,10 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
/**
* ice_init_pf_dcb - initialize DCB for a PF
- * @pf: pf to initiialize DCB for
+ * @pf: PF to initialize DCB for
+ * @locked: Was function called with RTNL held
*/
-int ice_init_pf_dcb(struct ice_pf *pf)
+int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
@@ -360,33 +369,10 @@ int ice_init_pf_dcb(struct ice_pf *pf)
port_info = hw->port_info;
- /* check if device is DCB capable */
- if (!hw->func_caps.common_cap.dcb) {
- dev_dbg(dev, "DCB not supported\n");
- return -EOPNOTSUPP;
- }
-
- /* Best effort to put DCBx and LLDP into a good state */
- port_info->dcbx_status = ice_get_dcbx_status(hw);
- if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
- port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
- bool dcbx_status;
-
- /* Attempt to start LLDP engine. Ignore errors
- * as this will error if it is already started
- */
- ice_aq_start_lldp(hw, NULL);
-
- /* Attempt to start DCBX. Ignore errors as this
- * will error if it is already started
- */
- ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
- }
-
err = ice_init_dcb(hw);
if (err) {
- /* FW LLDP not in usable state, default to SW DCBx/LLDP */
- dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
+ /* FW LLDP is not active, default to SW DCBX/LLDP */
+ dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->is_sw_lldp = true;
}
@@ -398,15 +384,16 @@ int ice_init_pf_dcb(struct ice_pf *pf)
if (port_info->is_sw_lldp) {
sw_default = 1;
dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
+ clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
+ } else {
+ set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
}
- if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
- sw_default = 1;
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
dev_info(&pf->pdev->dev, "DCBX not started\n");
- }
if (sw_default) {
- err = ice_dcb_sw_dflt_cfg(pf);
+ err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to set local DCB config %d\n", err);
@@ -425,7 +412,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- err = ice_dcb_init_cfg(pf);
+ err = ice_dcb_init_cfg(pf, locked);
if (err)
goto dcb_init_err;
@@ -515,6 +502,55 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
}
/**
+ * ice_dcb_need_recfg - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ */
+static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prio_table,
+ &old_cfg->etscfg.prio_table,
+ sizeof(new_cfg->etscfg.prio_table))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
@@ -523,29 +559,95 @@ void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event)
{
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
- struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
- int err;
-
- prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
- dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
- sizeof(*dcbcfg), GFP_KERNEL);
- if (!dcbcfg)
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_aqc_lldp_get_mib *mib;
+ struct ice_dcbx_cfg tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ struct ice_port_info *pi;
+ u8 type;
+ int ret;
+
+ /* Not DCB capable or capability disabled */
+ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
+ return;
+
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
+ dev_dbg(&pf->pdev->dev,
+ "MIB Change Event in HOST mode\n");
+ return;
+ }
+
+ pi = pf->hw.port_info;
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
+ if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
+ return;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ dev_dbg(&pf->pdev->dev,
+ "LLDP event mib type %s\n", type ? "remote" : "local");
+ if (type == ICE_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+ &pi->remote_dcbx_cfg);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
return;
+ }
+ }
- err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
- if (!err)
- ice_pf_dcb_cfg(pf, dcbcfg);
+ /* store the old configuration */
+ tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
- devm_kfree(&pf->pdev->dev, dcbcfg);
+ /* Reset the old DCBX configuration data */
+ memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
- /* Get updated DCBx data from firmware */
- err = ice_get_dcb_cfg(pf->hw.port_info);
- if (err)
- dev_err(&pf->pdev->dev,
- "Failed to get DCB config\n");
- } else {
+ /* Get updated DCBX data from firmware */
+ ret = ice_get_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
+ return;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev,
- "MIB Change Event in HOST mode\n");
+ "No change detected in DCBX configuration.\n");
+ return;
+ }
+
+ need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
+ &pi->local_dcbx_cfg);
+ if (!need_reconfig)
+ return;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
+ dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+
+ rtnl_lock();
+ ice_pf_dis_all_vsi(pf, true);
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ rtnl_unlock();
+ return;
+ }
+
+ /* changes in configuration update VSI */
+ ice_pf_dcb_recfg(pf);
+
+ ice_pf_ena_all_vsi(pf, true);
+ rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index ca7b76faa03c..819081053ff5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -14,7 +14,7 @@ void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
-int ice_init_pf_dcb(struct ice_pf *pf);
+int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
@@ -40,7 +40,8 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
-static inline int ice_init_pf_dcb(struct ice_pf *pf)
+static inline int
+ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
dev_dbg(&pf->pdev->dev, "DCB not supported\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1341fde8d53f..52083a63dee6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -45,22 +45,40 @@ static int ice_q_stats_len(struct net_device *netdev)
ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
- ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
- ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
- ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
- ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
+ ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
- ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
- ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
- ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
+ ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
+ ICE_VSI_STAT("tx_linearize", tx_linearize),
+};
+
+enum ice_ethtool_test_id {
+ ICE_ETH_TEST_REG = 0,
+ ICE_ETH_TEST_EEPROM,
+ ICE_ETH_TEST_INTR,
+ ICE_ETH_TEST_LOOP,
+ ICE_ETH_TEST_LINK,
};
+static const char ice_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "EEPROM test (offline)",
+ "Interrupt test (offline)",
+ "Loopback test (offline)",
+ "Link test (on/offline)",
+};
+
+#define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN)
+
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they aren't. This device is capable of supporting multiple
* VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
@@ -71,45 +89,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* is queried on the base PF netdev.
*/
static const struct ice_stats ice_gstrings_pf_stats[] = {
- ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
- ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
- ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
- ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
- ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
- ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
- ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
- ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
- ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors),
- ICE_PF_STAT("port.tx_size_64", stats.tx_size_64),
- ICE_PF_STAT("port.rx_size_64", stats.rx_size_64),
- ICE_PF_STAT("port.tx_size_127", stats.tx_size_127),
- ICE_PF_STAT("port.rx_size_127", stats.rx_size_127),
- ICE_PF_STAT("port.tx_size_255", stats.tx_size_255),
- ICE_PF_STAT("port.rx_size_255", stats.rx_size_255),
- ICE_PF_STAT("port.tx_size_511", stats.tx_size_511),
- ICE_PF_STAT("port.rx_size_511", stats.rx_size_511),
- ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
- ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
- ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
- ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
- ICE_PF_STAT("port.tx_size_big", stats.tx_size_big),
- ICE_PF_STAT("port.rx_size_big", stats.rx_size_big),
- ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
- ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
- ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
- ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
- ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
- ICE_PF_STAT("port.rx_undersize", stats.rx_undersize),
- ICE_PF_STAT("port.rx_fragments", stats.rx_fragments),
- ICE_PF_STAT("port.rx_oversize", stats.rx_oversize),
- ICE_PF_STAT("port.rx_jabber", stats.rx_jabber),
- ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
- ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors),
- ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
- ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors),
- ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
- ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
- ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+ ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
+ ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
+ ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
+ ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
+ ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
+ ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
+ ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
+ ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
+ ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
+ ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
+ ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
+ ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
+ ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
+ ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
+ ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
+ ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
+ ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
+ ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
+ ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
+ ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
+ ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
+ ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
+ ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
+ ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
+ ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
+ ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
+ ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
+ ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
+ ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
+ ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
+ ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
+ ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
+ ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
+ ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
+ ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
+ ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
+ ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
};
static const u32 ice_regs_dump_list[] = {
@@ -120,6 +138,9 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
+ PF0INT_ITR_0(0),
+ PF0INT_ITR_1(0),
+ PF0INT_ITR_2(0),
};
struct ice_priv_flag {
@@ -134,7 +155,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
- ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
+ ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -278,6 +299,571 @@ out:
return ret;
}
+/**
+ * ice_active_vfs - check if there are any active VFs
+ * @pf: board private structure
+ *
+ * Returns true if an active VF is found, otherwise returns false
+ */
+static bool ice_active_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return true;
+ return false;
+}
+
+/**
+ * ice_link_test - perform a link test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_link_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum ice_status status;
+ bool link_up = false;
+
+ netdev_info(netdev, "link test\n");
+ status = ice_get_link_status(np->vsi->port_info, &link_up);
+ if (status) {
+ netdev_err(netdev, "link query error, status = %d\n", status);
+ return 1;
+ }
+
+ if (!link_up)
+ return 2;
+
+ return 0;
+}
+
+/**
+ * ice_eeprom_test - perform an EEPROM test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_eeprom_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ netdev_info(netdev, "EEPROM test\n");
+ return !!(ice_nvm_validate_checksum(&pf->hw));
+}
+
+/**
+ * ice_reg_pattern_test
+ * @hw: pointer to the HW struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ */
+static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
+{
+ struct ice_pf *pf = (struct ice_pf *)hw->back;
+ static const u32 patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5,
+ 0x00000000, 0xFFFFFFFF
+ };
+ u32 val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
+ u32 pattern = patterns[i] & mask;
+
+ wr32(hw, reg, pattern);
+ val = rd32(hw, reg);
+ if (val == pattern)
+ continue;
+ dev_err(&pf->pdev->dev,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
+ , __func__, reg, pattern, val);
+ return 1;
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ dev_err(&pf->pdev->dev,
+ "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
+ , __func__, reg, orig_val, val);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reg_test - perform a register test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_reg_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_hw *hw = np->vsi->port_info->hw;
+ u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ?
+ hw->func_caps.common_cap.num_msix_vectors - 1 : 1;
+ struct ice_diag_reg_test_info {
+ u32 address;
+ u32 mask;
+ u32 elem_num;
+ u32 elem_size;
+ } ice_reg_list[] = {
+ {GLINT_ITR(0, 0), 0x00000fff, int_elements,
+ GLINT_ITR(0, 1) - GLINT_ITR(0, 0)},
+ {GLINT_ITR(1, 0), 0x00000fff, int_elements,
+ GLINT_ITR(1, 1) - GLINT_ITR(1, 0)},
+ {GLINT_ITR(0, 0), 0x00000fff, int_elements,
+ GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
+ {GLINT_CTL, 0xffff0001, 1, 0}
+ };
+ int i;
+
+ netdev_dbg(netdev, "Register test\n");
+ for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
+ u32 j;
+
+ for (j = 0; j < ice_reg_list[i].elem_num; ++j) {
+ u32 mask = ice_reg_list[i].mask;
+ u32 reg = ice_reg_list[i].address +
+ (j * ice_reg_list[i].elem_size);
+
+ /* bail on failure (non-zero return) */
+ if (ice_reg_pattern_test(hw, reg, mask))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_lbtest_prepare_rings - configure Tx/Rx test rings
+ * @vsi: pointer to the VSI structure
+ *
+ * Function configures rings of a VSI for loopback test without
+ * enabling interrupts or informing the kernel about new queues.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
+{
+ int status;
+
+ status = ice_vsi_setup_tx_rings(vsi);
+ if (status)
+ goto err_setup_tx_ring;
+
+ status = ice_vsi_setup_rx_rings(vsi);
+ if (status)
+ goto err_setup_rx_ring;
+
+ status = ice_vsi_cfg(vsi);
+ if (status)
+ goto err_setup_rx_ring;
+
+ status = ice_vsi_start_rx_rings(vsi);
+ if (status)
+ goto err_start_rx_ring;
+
+ return status;
+
+err_start_rx_ring:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_rx_ring:
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
+err_setup_tx_ring:
+ ice_vsi_free_tx_rings(vsi);
+
+ return status;
+}
+
+/**
+ * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test
+ * @vsi: pointer to the VSI structure
+ *
+ * Function stops and frees VSI rings after a loopback test.
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
+{
+ int status;
+
+ status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
+ if (status)
+ netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
+ vsi->vsi_num, status);
+
+ status = ice_vsi_stop_rx_rings(vsi);
+ if (status)
+ netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
+ vsi->vsi_num, status);
+
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+
+ return status;
+}
+
+/**
+ * ice_lbtest_create_frame - create test packet
+ * @pf: pointer to the PF structure
+ * @ret_data: allocated frame buffer
+ * @size: size of the packet data
+ *
+ * Function allocates a frame with a test pattern on specific offsets.
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
+{
+ u8 *data;
+
+ if (!pf)
+ return -EINVAL;
+
+ data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Since the ethernet test frame should always be at least
+ * 64 bytes long, fill some octets in the payload with test data.
+ */
+ memset(data, 0xFF, size);
+ data[32] = 0xDE;
+ data[42] = 0xAD;
+ data[44] = 0xBE;
+ data[46] = 0xEF;
+
+ *ret_data = data;
+
+ return 0;
+}
+
+/**
+ * ice_lbtest_check_frame - verify received loopback frame
+ * @frame: pointer to the raw packet data
+ *
+ * Function verifies received test frame with a pattern.
+ * Returns true if frame matches the pattern, false otherwise.
+ */
+static bool ice_lbtest_check_frame(u8 *frame)
+{
+ /* Validate bytes of a frame under offsets chosen earlier */
+ if (frame[32] == 0xDE &&
+ frame[42] == 0xAD &&
+ frame[44] == 0xBE &&
+ frame[46] == 0xEF &&
+ frame[48] == 0xFF)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_diag_send - send test frames to the test ring
+ * @tx_ring: pointer to the transmit ring
+ * @data: pointer to the raw packet data
+ * @size: size of the packet to send
+ *
+ * Function sends loopback packets on a test Tx ring.
+ */
+static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+{
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+ u64 td_cmd;
+
+ tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
+
+ dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -EINVAL;
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+
+ /* These flags are required for a descriptor to be pushed out */
+ td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
+ tx_desc->cmd_type_offset_bsz =
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((u64)0 << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((u64)0 << ICE_TXD_QW1_L2TAG1_S));
+
+ tx_buf->next_to_watch = tx_desc;
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use >= tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
+
+ /* Wait until the packets get transmitted to the receive queue. */
+ usleep_range(1000, 2000);
+ dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+#define ICE_LB_FRAME_SIZE 64
+/**
+ * ice_lbtest_receive_frames - receive and verify test frames
+ * @rx_ring: pointer to the receive ring
+ *
+ * Function receives loopback packets and verify their correctness.
+ * Returns number of received valid frames.
+ */
+static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+{
+ struct ice_rx_buf *rx_buf;
+ int valid_frames, i;
+ u8 *received_buf;
+
+ valid_frames = 0;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ union ice_32b_rx_flex_desc *rx_desc;
+
+ rx_desc = ICE_RX_DESC(rx_ring, i);
+
+ if (!(rx_desc->wb.status_error0 &
+ cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ continue;
+
+ rx_buf = &rx_ring->rx_buf[i];
+ received_buf = page_address(rx_buf->page);
+
+ if (ice_lbtest_check_frame(received_buf))
+ valid_frames++;
+ }
+
+ return valid_frames;
+}
+
+/**
+ * ice_loopback_test - perform a loopback test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_loopback_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
+ struct ice_pf *pf = orig_vsi->back;
+ struct ice_ring *tx_ring, *rx_ring;
+ u8 broadcast[ETH_ALEN], ret = 0;
+ int num_frames, valid_frames;
+ LIST_HEAD(tmp_list);
+ u8 *tx_frame;
+ int i;
+
+ netdev_info(netdev, "loopback test\n");
+
+ test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
+ if (!test_vsi) {
+ netdev_err(netdev, "Failed to create a VSI for the loopback test");
+ return 1;
+ }
+
+ test_vsi->netdev = netdev;
+ tx_ring = test_vsi->tx_rings[0];
+ rx_ring = test_vsi->rx_rings[0];
+
+ if (ice_lbtest_prepare_rings(test_vsi)) {
+ ret = 2;
+ goto lbtest_vsi_close;
+ }
+
+ if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) {
+ ret = 3;
+ goto lbtest_rings_dis;
+ }
+
+ /* Enable MAC loopback in firmware */
+ if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) {
+ ret = 4;
+ goto lbtest_mac_dis;
+ }
+
+ /* Test VSI needs to receive broadcast packets */
+ eth_broadcast_addr(broadcast);
+ if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
+ ret = 5;
+ goto lbtest_mac_dis;
+ }
+
+ if (ice_add_mac(&pf->hw, &tmp_list)) {
+ ret = 6;
+ goto free_mac_list;
+ }
+
+ if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
+ ret = 7;
+ goto remove_mac_filters;
+ }
+
+ num_frames = min_t(int, tx_ring->count, 32);
+ for (i = 0; i < num_frames; i++) {
+ if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
+ ret = 8;
+ goto lbtest_free_frame;
+ }
+ }
+
+ valid_frames = ice_lbtest_receive_frames(rx_ring);
+ if (!valid_frames)
+ ret = 9;
+ else if (valid_frames != num_frames)
+ ret = 10;
+
+lbtest_free_frame:
+ devm_kfree(&pf->pdev->dev, tx_frame);
+remove_mac_filters:
+ if (ice_remove_mac(&pf->hw, &tmp_list))
+ netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+free_mac_list:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_list);
+lbtest_mac_dis:
+ /* Disable MAC loopback after the test is completed. */
+ if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
+ netdev_err(netdev, "Could not disable MAC loopback\n");
+lbtest_rings_dis:
+ if (ice_lbtest_disable_rings(test_vsi))
+ netdev_err(netdev, "Could not disable test rings\n");
+lbtest_vsi_close:
+ test_vsi->netdev = NULL;
+ if (ice_vsi_release(test_vsi))
+ netdev_err(netdev, "Failed to remove the test VSI");
+
+ return ret;
+}
+
+/**
+ * ice_intr_test - perform an interrupt test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_intr_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ u16 swic_old = pf->sw_int_count;
+
+ netdev_info(netdev, "interrupt test\n");
+
+ wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx),
+ GLINT_DYN_CTL_SW_ITR_INDX_M |
+ GLINT_DYN_CTL_INTENA_MSK_M |
+ GLINT_DYN_CTL_SWINT_TRIG_M);
+
+ usleep_range(1000, 2000);
+ return (swic_old == pf->sw_int_count);
+}
+
+/**
+ * ice_self_test - handler function for performing a self-test by ethtool
+ * @netdev: network interface device structure
+ * @eth_test: ethtool_test structure
+ * @data: required by ethtool.self_test
+ *
+ * This function is called after invoking 'ethtool -t devname' command where
+ * devname is the name of the network device on which ethtool should operate.
+ * It performs a set of self-tests to check if a device works properly.
+ */
+static void
+ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ netdev_info(netdev, "offline testing starting\n");
+
+ set_bit(__ICE_TESTING, pf->state);
+
+ if (ice_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ data[ICE_ETH_TEST_REG] = 1;
+ data[ICE_ETH_TEST_EEPROM] = 1;
+ data[ICE_ETH_TEST_INTR] = 1;
+ data[ICE_ETH_TEST_LOOP] = 1;
+ data[ICE_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__ICE_TESTING, pf->state);
+ goto skip_ol_tests;
+ }
+ /* If the device is online then take it offline */
+ if (if_running)
+ /* indicate we're in test mode */
+ ice_stop(netdev);
+
+ data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
+ data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev);
+ data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev);
+ data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev);
+ data[ICE_ETH_TEST_REG] = ice_reg_test(netdev);
+
+ if (data[ICE_ETH_TEST_LINK] ||
+ data[ICE_ETH_TEST_EEPROM] ||
+ data[ICE_ETH_TEST_LOOP] ||
+ data[ICE_ETH_TEST_INTR] ||
+ data[ICE_ETH_TEST_REG])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__ICE_TESTING, pf->state);
+
+ if (if_running) {
+ int status = ice_open(netdev);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Could not open device %s, err %d",
+ pf->int_name, status);
+ }
+ }
+ } else {
+ /* Online tests */
+ netdev_info(netdev, "online testing starting\n");
+
+ data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
+ if (data[ICE_ETH_TEST_LINK])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[ICE_ETH_TEST_REG] = 0;
+ data[ICE_ETH_TEST_EEPROM] = 0;
+ data[ICE_ETH_TEST_INTR] = 0;
+ data[ICE_ETH_TEST_LOOP] = 0;
+ }
+
+skip_ol_tests:
+ netdev_info(netdev, "testing finished\n");
+}
+
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -295,17 +881,17 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
- "tx-queue-%u.tx_packets", i);
+ "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
- "rx-queue-%u.rx_packets", i);
+ "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
@@ -320,21 +906,24 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.tx-priority-%u-xon", i);
+ "tx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.tx-priority-%u-xoff", i);
+ "tx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx-priority-%u-xon", i);
+ "rx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.rx-priority-%u-xoff", i);
+ "rx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_TEST:
+ memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
@@ -371,6 +960,185 @@ ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
}
/**
+ * ice_set_fec_cfg - Set link FEC options
+ * @netdev: network interface device structure
+ * @req_fec: FEC mode to configure
+ */
+static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_set_phy_cfg_data config = { 0 };
+ struct ice_aqc_get_phy_caps_data *caps;
+ struct ice_vsi *vsi = np->vsi;
+ u8 sw_cfg_caps, sw_cfg_fec;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+ if (!pi)
+ return -EOPNOTSUPP;
+
+ /* Changing the FEC parameters is not supported if not the PF VSI */
+ if (vsi->type != ICE_VSI_PF) {
+ netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Get last SW configuration */
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy SW configuration returned from PHY caps to PHY config */
+ ice_copy_phy_caps_to_cfg(caps, &config);
+ sw_cfg_caps = caps->caps;
+ sw_cfg_fec = caps->link_fec_options;
+
+ /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
+ memset(caps, 0, sizeof(*caps));
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
+ config.link_fec_opt = caps->link_fec_options;
+
+ ice_cfg_phy_fec(&config, req_fec);
+
+ /* If FEC mode has changed, then set PHY configuration and enable AN. */
+ if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
+ (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
+ config.link_fec_opt != sw_cfg_fec) {
+ if (caps->caps & ICE_AQC_PHY_AN_MODE)
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
+
+ if (status)
+ err = -EAGAIN;
+ }
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
+ return err;
+}
+
+/**
+ * ice_set_fecparam - Set FEC link options
+ * @netdev: network interface device structure
+ * @fecparam: Ethtool structure to retrieve FEC parameters
+ */
+static int
+ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_fec_mode fec;
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec = ICE_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = ICE_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = ICE_FEC_BASER;
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec = ICE_FEC_NONE;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ return ice_set_fec_cfg(netdev, fec);
+}
+
+/**
+ * ice_get_fecparam - Get link FEC options
+ * @netdev: network interface device structure
+ * @fecparam: Ethtool structure to retrieve FEC parameters
+ */
+static int
+ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *caps;
+ struct ice_link_status *link_info;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+
+ if (!pi)
+ return -EOPNOTSUPP;
+ link_info = &pi->phy.link_info;
+
+ /* Set FEC mode based on negotiated link info */
+ switch (link_info->fec_info) {
+ case ICE_AQ_LINK_25G_KR_FEC_EN:
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case ICE_AQ_LINK_25G_RS_528_FEC_EN:
+ /* fall through */
+ case ICE_AQ_LINK_25G_RS_544_FEC_EN:
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ }
+
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Set supported/configured FEC modes based on PHY capability */
+ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ if (caps->link_fec_options == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
+ return err;
+}
+
+/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
@@ -433,10 +1201,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
- if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
- if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
+ if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
enum ice_status status;
+ /* Disable FW LLDP engine */
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
NULL);
/* If unregistering for LLDP events fails, this is
@@ -450,7 +1219,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
*/
- status = ice_aq_stop_lldp(&pf->hw, true, NULL);
+ status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to stop LLDP agent\n");
@@ -458,9 +1227,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* will likely not need DCB, so failure to init is
* not a concern of ethtool
*/
- status = ice_init_pf_dcb(pf);
+ status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
+
+ /* Forward LLDP packets to default VSI so that they
+ * are passed up the stack
+ */
+ ice_cfg_sw_lldp(vsi, false, true);
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -468,12 +1242,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
*/
- status = ice_aq_start_lldp(&pf->hw, NULL);
+ status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to start LLDP Agent\n");
- /* AQ command to start FW DCBx agent will fail if
+ /* AQ command to start FW DCBX agent will fail if
* the agent is already started
*/
status = ice_aq_start_stop_dcbx(&pf->hw, true,
@@ -491,15 +1265,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* registration/init failed but do not return error
* state to ethtool
*/
- status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
- NULL);
- if (status)
- dev_dbg(&pf->pdev->dev,
- "Fail to reg for MIB change\n");
-
- status = ice_init_pf_dcb(pf);
+ status = ice_init_pf_dcb(pf, true);
if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+
+ /* Remove rule to direct LLDP packets to default VSI.
+ * The FW LLDP engine will now be consuming them.
+ */
+ ice_cfg_sw_lldp(vsi, false, false);
}
}
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
@@ -529,6 +1302,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
+ case ETH_SS_TEST:
+ return ICE_TEST_LEN;
case ETH_SS_PRIV_FLAGS:
return ICE_PRIV_FLAG_ARRAY_SIZE;
default:
@@ -628,7 +1403,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
100baseT_Full);
}
@@ -636,14 +1412,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseKX_Full);
}
@@ -651,14 +1429,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseX_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseT_Full);
}
@@ -666,7 +1446,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseX_Full);
}
@@ -674,7 +1455,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
5000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
5000baseT_Full);
}
@@ -684,28 +1466,32 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseKR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseSR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseLR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseLR_Full);
}
@@ -717,7 +1503,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
}
@@ -725,7 +1512,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
}
@@ -734,14 +1522,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseKR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full);
}
@@ -750,21 +1540,24 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseCR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseSR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full);
}
@@ -779,7 +1572,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseCR2_Full);
}
@@ -787,7 +1581,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseKR2_Full);
}
@@ -797,7 +1592,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseSR2_Full);
}
@@ -814,7 +1610,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -826,7 +1623,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -838,7 +1636,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -851,7 +1650,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode)
@@ -1275,6 +2075,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
@@ -1345,6 +2146,40 @@ ice_get_link_ksettings(struct net_device *netdev,
break;
}
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ goto done;
+
+ if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL))
+ netdev_info(netdev, "Get phy capability failed.\n");
+
+ /* Set supported FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
+ if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
+ caps, NULL))
+ netdev_info(netdev, "Get phy capability failed.\n");
+
+ /* Set advertised FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
return 0;
}
@@ -2371,8 +3206,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
- rc->ring->q_vector->v_idx),
+ wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx),
ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
pf->hw.intrl_gran));
}
@@ -2533,6 +3367,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
+ .self_test = ice_self_test,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
@@ -2557,6 +3392,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
+ .get_fecparam = ice_get_fecparam,
+ .set_fecparam = ice_set_fecparam,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index ec25f26069b0..6c5ce05742b1 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,6 +6,9 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096))
+#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096))
+#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096))
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -155,6 +158,7 @@
#define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
+#define PFINT_OICR_SWINT_M BIT(31)
#define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_OICR_CTL_ITR_INDX_S 11
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fbf1eba0cc2a..a19f5920733b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -137,6 +137,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
* for PF or EMP this field should be set to zero
*/
switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -251,6 +253,10 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rxrings;
+ /* There is no need to allocate q_vectors for a loopback VSI. */
+ if (vsi->type == ICE_VSI_LB)
+ return 0;
+
/* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
sizeof(*vsi->q_vectors), GFP_KERNEL);
@@ -275,6 +281,8 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ /* fall through */
+ case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
@@ -313,10 +321,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
- * of queues vectors, subtract 1 from the original vector
- * count
+ * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
+ * original vector count
*/
- vsi->num_q_vectors = pf->num_vf_msix - 1;
+ vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
+ break;
+ case ICE_VSI_LB:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
@@ -516,6 +528,10 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_LB:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
@@ -732,6 +748,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
+ case ICE_VSI_LB:
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -924,6 +942,9 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
+ case ICE_VSI_LB:
+ dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
+ return;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
return;
@@ -955,6 +976,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -1145,61 +1168,32 @@ err_out:
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int num_q_vectors = 0;
+ u16 num_q_vectors;
+
+ /* SRIOV doesn't grab irq_tracker entries for each VSI */
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
- if (vsi->sw_base_vector || vsi->hw_base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
- vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
+ if (vsi->base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ vsi->vsi_num, vsi->base_vector);
return -EEXIST;
}
if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return -ENOENT;
- switch (vsi->type) {
- case ICE_VSI_PF:
- num_q_vectors = vsi->num_q_vectors;
- /* reserve slots from OS requested IRQs */
- vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
- num_q_vectors, vsi->idx);
- if (vsi->sw_base_vector < 0) {
- dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num,
- vsi->sw_base_vector);
- return -ENOENT;
- }
- pf->num_avail_sw_msix -= num_q_vectors;
-
- /* reserve slots from HW interrupts */
- vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
- num_q_vectors, vsi->idx);
- break;
- case ICE_VSI_VF:
- /* take VF misc vector and data vectors into account */
- num_q_vectors = pf->num_vf_msix;
- /* For VF VSI, reserve slots only from HW interrupts */
- vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
- num_q_vectors, vsi->idx);
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- break;
- }
-
- if (vsi->hw_base_vector < 0) {
+ num_q_vectors = vsi->num_q_vectors;
+ /* reserve slots from OS requested IRQs */
+ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ vsi->idx);
+ if (vsi->base_vector < 0) {
dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
- if (vsi->type != ICE_VSI_VF) {
- ice_free_res(pf->sw_irq_tracker,
- vsi->sw_base_vector, vsi->idx);
- pf->num_avail_sw_msix += num_q_vectors;
- }
+ "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
-
- pf->num_avail_hw_msix -= num_q_vectors;
+ pf->num_avail_sw_msix -= num_q_vectors;
return 0;
}
@@ -1842,8 +1836,73 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
}
/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+#else
+static void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+#endif /* CONFIG_PCI_IOV */
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+#else
+static void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+#endif /* CONFIG_PCI_IOV */
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
+ *
+ * This configures MSIX mode interrupts for the PF VSI, and should not be used
+ * for the VF VSI.
*/
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
@@ -1873,43 +1932,17 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
- int itr_idx = (q_vector->tx.itr_idx <<
- QINT_TQCTL_ITR_INDX_S) &
- QINT_TQCTL_ITR_INDX_M;
- u32 val;
-
- if (vsi->type == ICE_VSI_VF)
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- (((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
- QINT_TQCTL_MSIX_INDX_M);
- else
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
- QINT_TQCTL_MSIX_INDX_M);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ ice_cfg_txq_interrupt(vsi, txq, reg_idx,
+ q_vector->tx.itr_idx);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
- int itr_idx = (q_vector->rx.itr_idx <<
- QINT_RQCTL_ITR_INDX_S) &
- QINT_RQCTL_ITR_INDX_M;
- u32 val;
-
- if (vsi->type == ICE_VSI_VF)
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- (((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
- QINT_RQCTL_MSIX_INDX_M);
- else
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
- QINT_RQCTL_MSIX_INDX_M);
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
+ q_vector->rx.itr_idx);
rxq++;
}
}
-
- ice_flush(hw);
}
/**
@@ -2024,6 +2057,19 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2070,8 +2116,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- if (!rings || !rings[q_idx] ||
- !rings[q_idx]->q_vector) {
+ struct ice_q_vector *q_vector;
+
+ if (!rings || !rings[q_idx]) {
err = -EINVAL;
goto err_out;
}
@@ -2091,9 +2138,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
/* trigger a software interrupt for the vector
* associated to the queue to schedule NAPI handler
*/
- wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_MSK_M);
+ q_vector = rings[i]->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
q_idx++;
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
@@ -2234,7 +2282,14 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
goto clear_reg_idx;
}
- q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector;
+ if (vsi->type == ICE_VSI_VF) {
+ struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+
+ q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
+ } else {
+ q_vector->reg_idx =
+ q_vector->v_idx + vsi->base_vector;
+ }
}
return 0;
@@ -2291,6 +2346,54 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
}
/**
+ * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * @vsi: the VSI being configured
+ * @tx: bool to determine Tx or Rx rule
+ * @create: bool to determine create or remove Rule
+ */
+void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
+
+ if (tx) {
+ list->fltr_info.fltr_act = ICE_DROP_PACKET;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+ } else {
+ list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ list->fltr_info.flag = ICE_FLTR_RX;
+ list->fltr_info.src_id = ICE_SRC_ID_LPORT;
+ }
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (create)
+ status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ else
+ status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+
+ if (status)
+ dev_err(&pf->pdev->dev,
+ "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "adding" : "removing", tx ? "TX" : "RX",
+ vsi->vsi_num, status);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
+/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2310,6 +2413,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev;
+ enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2389,23 +2493,24 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_alloc_q_vector;
- /* Setup Vector base only during VF init phase or when VF asks
- * for more vectors than assigned number. In all other cases,
- * assign hw_base_vector to the value given earlier.
- */
- if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto unroll_vector_base;
- } else {
- vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
- }
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto unroll_vector_base;
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
+
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
+ break;
+ case ICE_VSI_LB:
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_vsi_init;
break;
default:
/* clean up the resources and exit */
@@ -2416,12 +2521,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
+ vsi->vsi_num, status);
goto unroll_vector_base;
}
@@ -2430,19 +2535,28 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
+ * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
+ * need to be dropped so that VFs cannot send LLDP packets to reconfig
+ * DCB settings in the HW. Also, if the FW DCBX engine is not running
+ * then Rx LLDP packets need to be redirected up the stack.
*/
- if (vsi->type == ICE_VSI_PF)
+ if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
+ /* Tx LLDP packets */
+ ice_cfg_sw_lldp(vsi, true, true);
+
+ /* Rx LLDP packets */
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, true);
+ }
+
return vsi;
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- /* reclaim HW interrupt back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
@@ -2463,17 +2577,17 @@ unroll_get_qs:
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ u16 reg_idx = q_vector->reg_idx;
- wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
- wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++;
@@ -2495,7 +2609,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
@@ -2591,11 +2705,11 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
int count = 0;
int i;
- if (!res || index >= res->num_entries)
+ if (!res || index >= res->end)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ for (i = index; i < res->end && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
@@ -2613,10 +2727,9 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
- int start = res->search_hint;
- int end = start;
+ int start = 0, end = 0;
- if ((start + needed) > res->num_entries)
+ if (needed > res->end)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
@@ -2625,7 +2738,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
- if ((start + needed) > res->num_entries)
+ if ((start + needed) > res->end)
break;
}
@@ -2636,13 +2749,9 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
while (i != end)
res->list[i++] = id;
- if (end == res->num_entries)
- end = 0;
-
- res->search_hint = end;
return start;
}
- } while (1);
+ } while (end < res->end);
return -ENOMEM;
}
@@ -2654,16 +2763,11 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
* @needed: size of the block needed
* @id: identifier to track owner
*
- * Returns the base item index of the block, or -ENOMEM for error
- * The search_hint trick and lack of advanced fit-finding only works
- * because we're highly likely to have all the same sized requests.
- * Linear search time and any fragmentation should be minimal.
+ * Returns the base item index of the block, or negative for error
*/
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
- int ret;
-
if (!res || !pf)
return -EINVAL;
@@ -2674,16 +2778,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
}
- /* search based on search_hint */
- ret = ice_search_res(res, needed, id);
-
- if (ret < 0) {
- /* previous search failed. Reset search hint and try again */
- res->search_hint = 0;
- ret = ice_search_res(res, needed, id);
- }
-
- return ret;
+ return ice_search_res(res, needed, id);
}
/**
@@ -2692,7 +2787,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
*/
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
@@ -2738,6 +2833,21 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
+ */
+void ice_napi_del(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_del(&vsi->q_vectors[v_idx]->napi);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -2745,60 +2855,61 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- struct ice_vf *vf = NULL;
struct ice_pf *pf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
- if (vsi->type == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
- /* do not unregister and free netdevs while driver is in the reset
- * recovery pending state. Since reset/rebuild happens through PF
- * service task workqueue, its not a good idea to unregister netdev
- * that is associated to the PF that is running the work queue items
- * currently. This is done to avoid check_flush_dependency() warning
- * on this wq
+ /* do not unregister while driver is in the reset recovery pending
+ * state. Since reset/rebuild happens through PF service task workqueue,
+ * it's not a good idea to unregister netdev that is associated to the
+ * PF that is running the work queue items currently. This is done to
+ * avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
- ice_napi_del(vsi);
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
/* Disable VSI and free resources */
- ice_vsi_dis_irq(vsi);
+ if (vsi->type != ICE_VSI_LB)
+ ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
- /* reclaim interrupt vectors back to PF */
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- /* reclaim HW interrupts back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
- } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
- /* Reclaim VF resources back only while freeing all VFs or
- * vector reassignment is requested
- */
- ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
- vsi->idx);
- pf->num_avail_hw_msix += pf->num_vf_msix;
}
- if (vsi->type == ICE_VSI_PF)
+ if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_cfg_sw_lldp(vsi, true, false);
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+ }
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
+
+ /* make sure unregister_netdev() was called by checking __ICE_DOWN */
+ if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
@@ -2825,6 +2936,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL;
+ enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -2838,24 +2950,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->sw_base_vector = 0;
- /* reclaim HW interrupts back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
- vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
- } else {
- /* Reclaim VF resources back to the common pool for reset and
- * and rebuild, with vector reassignment
- */
- ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
- vsi->idx);
- pf->num_avail_hw_msix += pf->num_vf_msix;
+ vsi->base_vector = 0;
}
- vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -2881,10 +2986,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -2929,12 +3030,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
+ vsi->vsi_num, status);
goto err_vectors;
}
return 0;
@@ -2956,7 +3057,7 @@ err_vsi:
/**
* ice_is_reset_in_progress - check for a reset in progress
- * @state: pf state field
+ * @state: PF state field
*/
bool ice_is_reset_in_progress(unsigned long *state)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index a91d3553cc89..6e43ef03bfc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -19,6 +19,14 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+#endif /* CONFIG_PCI_IOV */
+
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -37,6 +45,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
+void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
@@ -49,6 +59,8 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
+void ice_napi_del(struct ice_vsi *vsi);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
@@ -64,6 +76,8 @@ bool ice_is_reset_in_progress(unsigned long *state);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+
void ice_vsi_put_qs(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 7843abf4d44d..28ec0d57941d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -61,9 +61,10 @@ static u32 ice_get_tx_pending(struct ice_ring *ring)
static void ice_check_for_hang_subtask(struct ice_pf *pf)
{
struct ice_vsi *vsi = NULL;
+ struct ice_hw *hw;
unsigned int i;
- u32 v, v_idx;
int packets;
+ u32 v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
@@ -77,12 +78,12 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
return;
+ hw = &vsi->back->hw;
+
for (i = 0; i < vsi->num_txq; i++) {
struct ice_ring *tx_ring = vsi->tx_rings[i];
if (tx_ring && tx_ring->desc) {
- int itr = ICE_ITR_NONE;
-
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -93,12 +94,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
packets = tx_ring->stats.pkts & INT_MAX;
if (tx_ring->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
- v_idx = tx_ring->q_vector->v_idx;
- wr32(&vsi->back->hw,
- GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
- (itr << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_MSK_M);
+ ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
}
@@ -113,6 +109,67 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
+ * ice_init_mac_fltr - Set initial MAC filters
+ * @pf: board private structure
+ *
+ * Set initial set of MAC filters for PF VSI; configure filters for permanent
+ * address and broadcast address. If an error is encountered, netdevice will be
+ * unregistered.
+ */
+static int ice_init_mac_fltr(struct ice_pf *pf)
+{
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status;
+
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi)
+ return -EINVAL;
+
+ /* To add a MAC filter, first add the MAC to a list and then
+ * pass the list to ice_add_mac.
+ */
+
+ /* Add a unicast MAC filter so the VSI can get its packets */
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vsi->port_info->mac.perm_addr);
+ if (status)
+ goto unregister;
+
+ /* VSI needs to receive broadcast traffic, so add the broadcast
+ * MAC address to the list as well.
+ */
+ eth_broadcast_addr(broadcast);
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto free_mac_list;
+
+ /* Program MAC filters for entries in tmp_add_list */
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ status = -ENOMEM;
+
+free_mac_list:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+
+unregister:
+ /* We aren't useful with no MAC filters, so unregister if we
+ * had an error
+ */
+ if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(&pf->pdev->dev,
+ "Could not add MAC filters error %d. Unregistering device\n",
+ status);
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ return status;
+}
+
+/**
* ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
* @addr: MAC address to sync
@@ -567,7 +624,11 @@ static void ice_reset_subtask(struct ice_pf *pf)
*/
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
+ struct ice_aqc_get_phy_caps_data *caps;
+ enum ice_status status;
+ const char *fec_req;
const char *speed;
+ const char *fec;
const char *fc;
if (!vsi)
@@ -584,6 +645,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = "100 G";
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = "50 G";
+ break;
case ICE_AQ_LINK_SPEED_40GB:
speed = "40 G";
break;
@@ -615,13 +682,13 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
switch (vsi->port_info->fc.current_mode) {
case ICE_FC_FULL:
- fc = "RX/TX";
+ fc = "Rx/Tx";
break;
case ICE_FC_TX_PAUSE:
- fc = "TX";
+ fc = "Tx";
break;
case ICE_FC_RX_PAUSE:
- fc = "RX";
+ fc = "Rx";
break;
case ICE_FC_NONE:
fc = "None";
@@ -631,8 +698,47 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
break;
}
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
- speed, fc);
+ /* Get FEC mode based on negotiated link info */
+ switch (vsi->port_info->phy.link_info.fec_info) {
+ case ICE_AQ_LINK_25G_RS_528_FEC_EN:
+ /* fall through */
+ case ICE_AQ_LINK_25G_RS_544_FEC_EN:
+ fec = "RS-FEC";
+ break;
+ case ICE_AQ_LINK_25G_KR_FEC_EN:
+ fec = "FC-FEC/BASE-R";
+ break;
+ default:
+ fec = "NONE";
+ break;
+ }
+
+ /* Get FEC mode requested based on PHY caps last SW configuration */
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ fec_req = "Unknown";
+ goto done;
+ }
+
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ if (status)
+ netdev_info(vsi->netdev, "Get phy capability failed.\n");
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
+ fec_req = "RS-FEC";
+ else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ fec_req = "FC-FEC/BASE-R";
+ else
+ fec_req = "NONE";
+
+ devm_kfree(&vsi->back->pdev->dev, caps);
+
+done:
+ netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n",
+ speed, fec_req, fec, fc);
}
/**
@@ -664,7 +770,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
/**
* ice_link_event - process the link event
- * @pf: pf that the link event is associated with
+ * @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
* @link_up: true if the physical link is up and false if it is down
* @link_speed: current link speed received from the link event
@@ -774,7 +880,7 @@ static int ice_init_link_events(struct ice_port_info *pi)
/**
* ice_handle_link_event - handle link event via ARQ
- * @pf: pf that the link event is associated with
+ * @pf: PF that the link event is associated with
* @event: event structure containing link status info
*/
static int
@@ -1161,16 +1267,16 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
}
- /* see if one of the VFs needs to be reset */
- for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ /* check to see if one of the VFs caused the MDD */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vf *vf = &pf->vf[i];
- mdd_detected = false;
+ bool vf_mdd_detected = false;
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1178,7 +1284,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1186,7 +1292,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1194,19 +1300,18 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
- if (mdd_detected) {
+ if (vf_mdd_detected) {
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ if (vf->num_mdd_events > 1)
+ dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
+ i, vf->num_mdd_events);
}
}
-
}
/**
@@ -1327,7 +1432,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
@@ -1408,7 +1513,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1430,6 +1535,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_SWINT_M) {
+ ena_mask &= ~PFINT_OICR_SWINT_M;
+ pf->sw_int_count++;
+ }
+
if (oicr & PFINT_OICR_MAL_DETECT_M) {
ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
@@ -1556,15 +1666,13 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_flush(hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
+ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
- pf->msix_entries[pf->sw_oicr_idx].vector, pf);
+ pf->msix_entries[pf->oicr_idx].vector, pf);
}
pf->num_avail_sw_msix += 1;
- ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
- pf->num_avail_hw_msix += 1;
- ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
@@ -1618,43 +1726,31 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
if (ice_is_reset_in_progress(pf->state))
goto skip_req_irq;
- /* reserve one vector in sw_irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ /* reserve one vector in irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->sw_oicr_idx = oicr_idx;
-
- /* reserve one vector in hw_irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- if (oicr_idx < 0) {
- ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- pf->num_avail_sw_msix += 1;
- return oicr_idx;
- }
- pf->num_avail_hw_msix -= 1;
- pf->hw_oicr_idx = oicr_idx;
+ pf->oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->sw_oicr_idx].vector,
+ pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
- ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- pf->num_avail_hw_msix += 1;
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx);
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
+ ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw);
@@ -1664,21 +1760,6 @@ skip_req_irq:
}
/**
- * ice_napi_del - Remove NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be removed
- */
-void ice_napi_del(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_del(&vsi->q_vectors[v_idx]->napi);
-}
-
-/**
* ice_napi_add - register NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be registered
*
@@ -1803,8 +1884,8 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
* @pf: board private structure
* @pi: pointer to the port_info instance
*
- * Returns pointer to the successfully allocated VSI sw struct on success,
- * otherwise returns NULL on failure.
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
*/
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
@@ -1813,6 +1894,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_lb_vsi_setup - Set up a loopback VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+struct ice_vsi *
+ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+}
+
+/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
@@ -1900,8 +1995,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
- LIST_HEAD(tmp_add_list);
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
int status = 0;
@@ -1926,38 +2019,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vsi->port_info->mac.perm_addr);
+ status = ice_init_mac_fltr(pf);
if (status)
goto unroll_napi_add;
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto free_mac_list;
-
- /* program MAC filters for entries in tmp_add_list */
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status) {
- dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
- status = -ENOMEM;
- goto free_mac_list;
- }
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
-free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2149,14 +2216,9 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
- if (pf->sw_irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
- pf->sw_irq_tracker = NULL;
- }
-
- if (pf->hw_irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
- pf->hw_irq_tracker = NULL;
+ if (pf->irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
}
}
@@ -2166,7 +2228,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
- int vectors = 0, hw_vectors = 0;
+ int vectors;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf);
@@ -2177,31 +2239,18 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors;
/* set up vector assignment tracking */
- pf->sw_irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) +
+ pf->irq_tracker =
+ devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
(sizeof(u16) * vectors), GFP_KERNEL);
- if (!pf->sw_irq_tracker) {
+ if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
/* populate SW interrupts pool with number of OS granted IRQs. */
pf->num_avail_sw_msix = vectors;
- pf->sw_irq_tracker->num_entries = vectors;
-
- /* set up HW vector assignment tracking */
- hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- pf->hw_irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) +
- (sizeof(u16) * hw_vectors), GFP_KERNEL);
- if (!pf->hw_irq_tracker) {
- ice_clear_interrupt_scheme(pf);
- return -ENOMEM;
- }
-
- /* populate HW interrupts pool with number of HW supported irqs. */
- pf->num_avail_hw_msix = hw_vectors;
- pf->hw_irq_tracker->num_entries = hw_vectors;
+ pf->irq_tracker->num_entries = vectors;
+ pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
}
@@ -2252,7 +2301,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
- /* set up for high or low dma */
+ /* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -2302,7 +2351,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_pf(pf);
- err = ice_init_pf_dcb(pf);
+ err = ice_init_pf_dcb(pf, false);
if (err) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
@@ -2368,7 +2417,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2625,7 +2674,7 @@ static int __init ice_module_init(void)
status = pci_register_driver(&ice_driver);
if (status) {
- pr_err("failed to register pci driver, err %d\n", status);
+ pr_err("failed to register PCI driver, err %d\n", status);
destroy_workqueue(ice_wq);
}
@@ -2725,21 +2774,21 @@ free_lists:
ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
if (err) {
- netdev_err(netdev, "can't set mac %pM. filter update failed\n",
+ netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
return err;
}
/* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
+ netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
netdev->dev_addr);
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
mac);
}
return 0;
@@ -2876,6 +2925,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
+
return ret;
}
@@ -2901,7 +2957,7 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
*
* Return 0 on success and negative value on error
*/
-static int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
@@ -2933,7 +2989,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, q_idx) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -3456,7 +3512,7 @@ int ice_down(struct ice_vsi *vsi)
*
* Return 0 on success, negative on failure
*/
-static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
+int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
int i, err = 0;
@@ -3482,7 +3538,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*
* Return 0 on success, negative on failure
*/
-static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
int i, err = 0;
@@ -3658,7 +3714,7 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_vsi_rebuild_all - rebuild all VSIs in pf
+ * ice_vsi_rebuild_all - rebuild all VSIs in PF
* @pf: the PF
*/
static int ice_vsi_rebuild_all(struct ice_pf *pf)
@@ -3728,7 +3784,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
/**
* ice_rebuild - rebuild after reset
- * @pf: pf to rebuild
+ * @pf: PF to rebuild
*/
static void ice_rebuild(struct ice_pf *pf)
{
@@ -3740,7 +3796,7 @@ static void ice_rebuild(struct ice_pf *pf)
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
- dev_dbg(dev, "rebuilding pf\n");
+ dev_dbg(dev, "rebuilding PF\n");
ret = ice_init_all_ctrlq(hw);
if (ret) {
@@ -3768,12 +3824,6 @@ static void ice_rebuild(struct ice_pf *pf)
ice_dcb_rebuild(pf);
- /* reset search_hint of irq_trackers to 0 since interrupts are
- * reclaimed and could be allocated from beginning during VSI rebuild
- */
- pf->sw_irq_tracker->search_hint = 0;
- pf->hw_irq_tracker->search_hint = 0;
-
err = ice_vsi_rebuild_all(pf);
if (err) {
dev_err(dev, "ice_vsi_rebuild_all failed\n");
@@ -3857,16 +3907,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0;
if (new_mtu == netdev->mtu) {
- netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
+ netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
if (new_mtu < netdev->min_mtu) {
- netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
+ netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
} else if (new_mtu > netdev->max_mtu) {
- netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
+ netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
}
@@ -3882,7 +3932,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
} while (count < 100);
if (count == 100) {
- netdev_err(netdev, "can't change mtu. Device is busy\n");
+ netdev_err(netdev, "can't change MTU. Device is busy\n");
return -EBUSY;
}
@@ -3894,18 +3944,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
- netdev_err(netdev, "change mtu if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_up err %d\n", err);
return err;
}
err = ice_up(vsi);
if (err) {
- netdev_err(netdev, "change mtu if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_up err %d\n", err);
return err;
}
}
- netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
+ netdev_info(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
@@ -4241,7 +4291,7 @@ static void ice_tx_timeout(struct net_device *netdev)
*
* Returns 0 on success, negative value on failure
*/
-static int ice_open(struct net_device *netdev)
+int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -4278,7 +4328,7 @@ static int ice_open(struct net_device *netdev)
*
* Returns success only - not allowed to fail
*/
-static int ice_stop(struct net_device *netdev)
+int ice_stop(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 62571d33d0d6..bcb431f1bd92 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -119,7 +119,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status)
- *data = le16_to_cpu(*(__le16 *)data);
+ *data = le16_to_cpu(*(__force __le16 *)data);
return status;
}
@@ -174,7 +174,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
} while (words_read < *words);
for (i = 0; i < *words; i++)
- data[i] = le16_to_cpu(((__le16 *)data)[i]);
+ data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
read_nvm_buf_aq_exit:
*words = words_read;
@@ -316,3 +316,34 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
return status;
}
+
+/**
+ * ice_nvm_validate_checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity (0x0706)
+ */
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+{
+ struct ice_aqc_nvm_checksum *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
+ cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ ice_release_nvm(hw);
+
+ if (!status)
+ if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
+ status = ICE_ERR_NVM_CHECKSUM;
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 17afe6acb18a..c01597885629 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9f1f595ae7e6..8271fd651725 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -799,7 +799,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
case ICE_SW_LKUP_ETHERTYPE:
- off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break;
case ICE_SW_LKUP_MAC_VLAN:
@@ -829,7 +829,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
- off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
}
@@ -1973,6 +1973,10 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* ice_add_eth_mac - Add ethertype and MAC based filter rule
* @hw: pointer to the hardware structure
* @em_list: list of ether type MAC filter, MAC is optional
+ *
+ * This function requires the caller to populate the entries in
+ * the filter list with the necessary fields (including flags to
+ * indicate Tx or Rx rules).
*/
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
@@ -1990,7 +1994,6 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM;
- em_list_itr->fltr_info.flag = ICE_FLTR_TX;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
if (em_list_itr->status)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 732b0b9b2e15..cb123fbe30be 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,9 +8,11 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-#define ICE_INVAL_Q_HANDLE 0xFFFF
/* VSI queue context structure */
struct ice_q_ctx {
@@ -69,9 +71,6 @@ struct ice_fltr_info {
/* rule ID returned by firmware once filter rule is created */
u16 fltr_rule_id;
u16 flag;
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
-#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 2364eaf33d23..3c83230434b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -55,7 +55,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return;
- /* Free all the Tx ring sk_bufss */
+ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
@@ -1101,7 +1101,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
* @port_info: port_info structure containing the current link speed
* @avg_pkt_size: average size of Tx or Rx packets based on clean routine
- * @itr: itr value to update
+ * @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current
@@ -1316,7 +1316,7 @@ clear_counts:
*/
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
- /* The itr value is reported in microseconds, and the register value is
+ /* The ITR value is reported in microseconds, and the register value is
* recorded in 2 microsecond units. For this reason we only need to
* shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
* granularity as a shift instead of division. The mask makes sure the
@@ -1645,7 +1645,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
return;
dma_error:
- /* clear dma mappings for failed tx_buf map */
+ /* clear DMA mappings for failed tx_buf map */
for (;;) {
tx_buf = &tx_ring->tx_buf[i];
ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
@@ -1874,10 +1874,10 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
cd_mss = skb_shinfo(skb)->gso_size;
/* record cdesc_qw1 with TSO parameters */
- off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
- (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
- (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
- (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
+ off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
+ (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
+ (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
first->tx_flags |= ICE_TX_FLAGS_TSO;
return 1;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 66e05032ee56..ec76aba347b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -58,19 +58,19 @@ struct ice_tx_buf {
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
- DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
+ DEFINE_DMA_UNMAP_ADDR(dma);
};
struct ice_tx_offload_params {
- u8 header_len;
+ u64 cd_qw1;
+ struct ice_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
- u16 cd_l2tag2;
u32 cd_tunnel_params;
- u64 cd_qw1;
- struct ice_ring *tx_ring;
+ u16 cd_l2tag2;
+ u8 header_len;
};
struct ice_rx_buf {
@@ -150,6 +150,7 @@ enum ice_rx_dtype {
/* descriptor ring, associated with a VSI */
struct ice_ring {
+ /* CL1 - 1st cacheline starts here */
struct ice_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
@@ -161,11 +162,11 @@ struct ice_ring {
struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
};
+ /* CL2 - 2nd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u32 txq_teid; /* Added Tx queue TEID */
-#ifdef CONFIG_DCB
- u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
+ u16 q_handle; /* Queue handle per TC */
+
+ u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,8 +174,7 @@ struct ice_ring {
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- u8 ring_active; /* is ring online or not */
+ u16 next_to_alloc;
/* stats structs */
struct ice_q_stats stats;
@@ -184,10 +184,17 @@ struct ice_ring {
struct ice_rxq_stats rx_stats;
};
- unsigned int size; /* length of descriptor ring in bytes */
- dma_addr_t dma; /* physical address of ring */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
+ /* CLX - the below items are only accessed infrequently and should be
+ * in their own cache line if possible
+ */
+ dma_addr_t dma; /* physical address of ring */
+ unsigned int size; /* length of descriptor ring in bytes */
+ u32 txq_teid; /* Added Tx queue TEID */
+ u16 rx_buf_len;
+#ifdef CONFIG_DCB
+ u8 dcb_tc; /* Traffic class of ring */
+#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
struct ice_ring_container {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a862af4cbf78..24bbef8bbe69 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -23,6 +23,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
+#define ICE_DBG_FW_LOG BIT_ULL(3)
#define ICE_DBG_LINK BIT_ULL(4)
#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
@@ -61,6 +62,13 @@ enum ice_fc_mode {
ICE_FC_DFLT
};
+enum ice_fec_mode {
+ ICE_FEC_NONE = 0,
+ ICE_FEC_RS,
+ ICE_FEC_BASER,
+ ICE_FEC_AUTO
+};
+
enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET,
@@ -86,12 +94,14 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF,
+ ICE_VSI_LB = 6,
};
struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low;
u64 phy_type_high;
+ u8 topo_media_conflict;
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
@@ -99,6 +109,7 @@ struct ice_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 fec_info;
u8 pacing;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
@@ -423,7 +434,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the itr/intrl granularity during
+ * register. Used for determining the ITR/intrl granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a805cbdd69be..5d24b539648f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -103,7 +103,7 @@ ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
u16 link_speed;
if (link_up)
- link_speed = ICE_AQ_LINK_SPEED_40GB;
+ link_speed = ICE_AQ_LINK_SPEED_100GB;
else
link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -141,32 +141,20 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
- * ice_get_vf_vector - get VF interrupt vector register offset
- * @vf_msix: number of MSIx vector per VF on a PF
- * @vf_id: VF identifier
- * @i: index of MSIx vector
- */
-static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
-{
- return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
- VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
-}
-
-/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
static void ice_free_vf_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
- int i, pf_vf_msix;
+ int i, last_vector_idx;
/* First, disable VF's configuration API to prevent OS from
* accessing the VF's VSI after it's freed or invalidated.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* free vsi & disconnect it from the parent uplink */
+ /* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx) {
ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
@@ -174,13 +162,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- pf_vf_msix = pf->num_vf_msix;
+ last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
/* Disable interrupts so that VF starts in a known state */
- for (i = 0; i < pf_vf_msix; i++) {
- u32 reg_idx;
-
- reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
- wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ice_flush(&pf->hw);
}
/* reset some of the state variables keeping track of the resources */
@@ -205,8 +190,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
- first = vf->first_vector_idx +
- hw->func_caps.common_cap.msix_vector_first_id;
+ first = vf->first_vector_idx;
last = first + pf->num_vf_msix - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -232,6 +216,42 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
+ *
+ * If MSIX entries from the pf->irq_tracker were needed then we need to
+ * reset the irq_tracker->end and give back the entries we needed to
+ * num_avail_sw_msix.
+ *
+ * If no MSIX entries were taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
+ *
+ * Returns 0 on success, and -EINVAL on error.
+ */
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
+{
+ struct ice_res_tracker *res;
+
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ if (pf->sriov_base_vector < res->num_entries) {
+ res->end = res->num_entries;
+ pf->num_avail_sw_msix +=
+ res->num_entries - pf->sriov_base_vector;
+ }
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -246,15 +266,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vsi *vsi;
@@ -270,6 +281,15 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
@@ -288,6 +308,10 @@ void ice_free_vfs(struct ice_pf *pf)
}
}
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(&pf->pdev->dev,
+ "Failed to free MSIX resources used by SR-IOV\n");
+
devm_kfree(&pf->pdev->dev, pf->vf);
pf->vf = NULL;
@@ -457,6 +481,22 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
+ * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
+ *
+ * This returns the first MSIX vector index in HW that is used by this VF and
+ * this will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->hw.func_caps.common_cap.msix_vector_first_id +
+ pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+}
+
+/**
* ice_alloc_vsi_res - Setup VF VSI and its resources
* @vf: pointer to the VF structure
*
@@ -470,8 +510,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int status = 0;
- vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
return -ENOMEM;
@@ -480,14 +522,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_num = vsi->vsi_num;
- /* first vector index is the VFs OICR index */
- vf->first_vector_idx = vsi->hw_base_vector;
- /* Since hw_base_vector holds the vector where data queue interrupts
- * starts, increment by 1 since VFs allocated vectors include OICR intr
- * as well.
- */
- vsi->hw_base_vector += 1;
-
/* Check if port VLAN exist before, and restore it accordingly */
if (vf->port_vlan_id) {
ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
@@ -580,8 +614,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx +
- hw->func_caps.common_cap.msix_vector_first_id;
+ first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
@@ -687,6 +720,97 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
}
/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
+ *
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
+ *
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so in many cases the irq_tracker will not
+ * be needed. In these cases we just set the pf->sriov_base_vector and return
+ * success.
+ *
+ * If SR-IOV needs to use any pf->irq_tracker entries it updates the
+ * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
+ * so any calls to ice_get_res() using the irq_tracker will not try to use
+ * resources at or beyond the newly set value.
+ *
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * in the PF's space available for SR-IOV.
+ */
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 pf_total_msix_vectors =
+ pf->hw.func_caps.common_cap.num_msix_vectors;
+ struct ice_res_tracker *res = pf->irq_tracker;
+ int sriov_base_vector;
+
+ if (max_valid_res_idx < 0)
+ return max_valid_res_idx;
+
+ sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector <= max_valid_res_idx)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ /* dip into irq_tracker entries and update used resources */
+ if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
+ pf->num_avail_sw_msix -=
+ res->num_entries - pf->sriov_base_vector;
+ res->end = pf->sriov_base_vector;
+ }
+
+ return 0;
+}
+
+/**
* ice_check_avail_res - check if vectors and queues are available
* @pf: pointer to the PF structure
*
@@ -696,11 +820,16 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
*/
static int ice_check_avail_res(struct ice_pf *pf)
{
- u16 num_msix, num_txq, num_rxq;
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix, num_txq, num_rxq, num_avail_msix;
- if (!pf->num_alloc_vfs)
+ if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
+ /* add 1 to max_valid_res_idx to account for it being 0-based */
+ num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
+ (max_valid_res_idx + 1);
+
/* Grab from HW interrupts common pool
* Note: By the time the user decides it needs more vectors in a VF
* its already too late since one must decide this prior to creating the
@@ -717,11 +846,11 @@ static int ice_check_avail_res(struct ice_pf *pf)
* grab default interrupt vectors (5 as supported by AVF driver).
*/
if (pf->num_alloc_vfs <= 16) {
- num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ num_msix = ice_determine_res(pf, num_avail_msix,
ICE_MAX_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
- num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ num_msix = ice_determine_res(pf, num_avail_msix,
ICE_DFLT_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else {
@@ -750,6 +879,9 @@ static int ice_check_avail_res(struct ice_pf *pf)
if (!num_txq || !num_rxq)
return -EIO;
+ if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
+ return -EINVAL;
+
/* since AVF driver works with only queue pairs which means, it expects
* to have equal number of Rx and Tx queues, so take the minimum of
* available Tx or Rx queues
@@ -938,6 +1070,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf->num_vf_qs = 0;
}
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(&pf->pdev->dev,
+ "Failed to free MSIX resources used by SR-IOV\n");
+
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
"Cannot allocate VF resources, try with fewer number of VFs\n");
@@ -1119,7 +1255,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
int i, ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
ice_flush(hw);
@@ -1134,7 +1270,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
- goto err_unroll_sriov;
+ goto err_pci_disable_sriov;
}
pf->vf = vfs;
@@ -1154,12 +1290,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, true))
+ if (!ice_reset_all_vfs(pf, true)) {
+ ret = -EIO;
goto err_unroll_sriov;
+ }
goto err_unroll_intr;
err_unroll_sriov:
+ pf->vf = NULL;
+ devm_kfree(&pf->pdev->dev, vfs);
+ vfs = NULL;
+ pf->num_alloc_vfs = 0;
+err_pci_disable_sriov:
pci_disable_sriov(pf->pdev);
err_unroll_intr:
/* rearm interrupts here */
@@ -1168,8 +1311,8 @@ err_unroll_intr:
}
/**
- * ice_pf_state_is_nominal - checks the pf for nominal state
- * @pf: pointer to pf to check
+ * ice_pf_state_is_nominal - checks the PF for nominal state
+ * @pf: pointer to PF to check
*
* Check the PF's state for a collection of bits that would indicate
* the PF is in a state that would inhibit normal operation for
@@ -1496,7 +1639,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
- * @pf: the pf structure to search for the VSI
+ * @pf: the PF structure to search for the VSI
* @id: ID of the VSI it is searching for
*
* searches for the VSI with the given ID
@@ -1807,28 +1950,37 @@ error_param:
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_irq_map_info *irqmap_info =
- (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_irq_map_info *irqmap_info;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
- struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
+ u16 num_q_vectors_mapped;
+ struct ice_vsi *vsi;
unsigned long qmap;
- u16 num_q_vectors;
int i;
- num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !vsi || vsi->num_q_vectors < num_q_vectors ||
- irqmap_info->num_vectors == 0) {
+ pf->num_vf_msix < num_q_vectors_mapped ||
+ !irqmap_info->num_vectors) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- for (i = 0; i < num_q_vectors; i++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
map = &irqmap_info->vecmap[i];
@@ -1836,7 +1988,21 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vsi_id = map->vsi_id;
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
- .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1852,6 +2018,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
}
qmap = map->txq_map;
@@ -1864,11 +2032,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
}
}
- if (vsi)
- ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
@@ -1903,9 +2071,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (!vsi)
goto error_param;
- }
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 3725aea16840..c3ca522c245a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -49,29 +49,34 @@ struct ice_vf {
struct ice_pf *pf;
s16 vf_id; /* VF ID in the PF space */
- u32 driver_caps; /* reported by VF driver */
+ u16 lan_vsi_idx; /* index into PF struct */
int first_vector_idx; /* first vector index of this VF */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
- u8 pf_set_mac; /* VF MAC address set by VMM admin */
- u8 trusted;
- u16 lan_vsi_idx; /* index into PF struct */
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
- u8 link_forced;
- u8 link_up; /* only valid if VF link is forced */
- u8 spoofchk;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
- u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
@@ -96,6 +101,8 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -161,5 +168,11 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index bafdcf70a353..3ec2ce0725d5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
dev_spec->sgmii_active = true;
break;
}
- /* fall through for I2C based SGMII */
+ /* fall through - for I2C based SGMII */
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
/* read media type from SFP EEPROM */
ret_val = igb_set_sfp_media_type_82575(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 39f33afc479c..fc925adbd9fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -753,6 +753,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
struct net_device *netdev = igb->netdev;
hw->hw_addr = NULL;
netdev_err(netdev, "PCIe link lost\n");
+ WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
}
return value;
@@ -6695,7 +6696,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
igb_setup_dca(adapter);
break;
}
- /* Fall Through since DCA is disabled. */
+ /* Fall Through - since DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
/* without this a class_device is left
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 51a8b8769c67..59258d791106 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -10,50 +10,6 @@
#include "igc.h"
/**
- * igc_set_pcie_completion_timeout - set pci-e completion timeout
- * @hw: pointer to the HW structure
- */
-static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
-{
- u32 gcr = rd32(IGC_GCR);
- u16 pcie_devctl2;
- s32 ret_val = 0;
-
- /* only take action if timeout value is defaulted to 0 */
- if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
- goto out;
-
- /* if capabilities version is type 1 we can write the
- * timeout of 10ms to 200ms through the GCR register
- */
- if (!(gcr & IGC_GCR_CAP_VER2)) {
- gcr |= IGC_GCR_CMPL_TMOUT_10ms;
- goto out;
- }
-
- /* for version 2 capabilities we need to write the config space
- * directly in order to set the completion timeout value for
- * 16ms to 55ms
- */
- ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
- if (ret_val)
- goto out;
-
- pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
-
- ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
-out:
- /* disable completion timeout resend */
- gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
-
- wr32(IGC_GCR, gcr);
-
- return ret_val;
-}
-
-/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
@@ -72,11 +28,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
if (ret_val)
hw_dbg("PCI-E Master disable polling has failed.\n");
- /* set the completion timeout for interface */
- ret_val = igc_set_pcie_completion_timeout(hw);
- if (ret_val)
- hw_dbg("PCI-E Set completion timeout has failed.\n");
-
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index a9a30268de59..fc0ccfe38a20 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -5,8 +5,8 @@
#define _IGC_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
@@ -29,12 +29,6 @@
/* Status of Master requests. */
#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
-/* PCI Express Control */
-#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000
-#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define IGC_GCR_CAP_VER2 0x00040000
-
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
@@ -72,6 +66,9 @@
#define IGC_CONNSW_AUTOSENSE_EN 0x1
+/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
+#define MAX_JUMBO_FRAME_SIZE 0x2600
+
/* PBA constants */
#define IGC_PBA_34K 0x0022
@@ -264,9 +261,6 @@
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
-#define IGC_CT_SHIFT 4
-#define IGC_COLLISION_THRESHOLD 15
-
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
@@ -398,7 +392,7 @@
#define IGC_MDIC_ERROR 0x40000000
#define IGC_MDIC_DEST 0x80000000
-#define IGC_N0_QUEUE -1
+#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
#define IGC_MAX_NETWORK_HDR_LEN 511
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 7c88b7bd4799..1039a224ac80 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -114,11 +114,8 @@ struct igc_nvm_operations {
struct igc_phy_operations {
s32 (*acquire)(struct igc_hw *hw);
- s32 (*check_polarity)(struct igc_hw *hw);
s32 (*check_reset_block)(struct igc_hw *hw);
s32 (*force_speed_duplex)(struct igc_hw *hw);
- s32 (*get_cfg_done)(struct igc_hw *hw);
- s32 (*get_cable_length)(struct igc_hw *hw);
s32 (*get_phy_info)(struct igc_hw *hw);
s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
void (*release)(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index f7683d3ae47c..ba4646737288 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -8,7 +8,6 @@
#include "igc_hw.h"
/* forward declaration */
-static s32 igc_set_default_fc(struct igc_hw *hw);
static s32 igc_set_fc_watermarks(struct igc_hw *hw);
/**
@@ -96,13 +95,10 @@ s32 igc_setup_link(struct igc_hw *hw)
goto out;
/* If requested flow control is set to default, set flow control
- * based on the EEPROM flow control settings.
+ * to the both 'rx' and 'tx' pause frames.
*/
- if (hw->fc.requested_mode == igc_fc_default) {
- ret_val = igc_set_default_fc(hw);
- if (ret_val)
- goto out;
- }
+ if (hw->fc.requested_mode == igc_fc_default)
+ hw->fc.requested_mode = igc_fc_full;
/* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
@@ -136,19 +132,6 @@ out:
}
/**
- * igc_set_default_fc - Set flow control default values
- * @hw: pointer to the HW structure
- *
- * Read the EEPROM for the default values for flow control and store the
- * values.
- */
-static s32 igc_set_default_fc(struct igc_hw *hw)
-{
- hw->fc.requested_mode = igc_fc_full;
- return 0;
-}
-
-/**
* igc_force_mac_fc - Force the MAC's flow control settings
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 34fa0e60a780..93f3b4e6185b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -72,6 +72,27 @@ void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct igc_fc_info *fc = &hw->fc;
+ u32 pba, hwm;
+
+ /* Repartition PBA for greater than 9k MTU if required */
+ pba = IGC_PBA_34K;
+
+ /* flow control settings
+ * The high water mark must be low enough to fit one full frame
+ * after transmitting the pause frame. As such we must have enough
+ * space to allow for us to complete our current transmit and then
+ * receive the frame that is in progress from the link partner.
+ * Set it to:
+ * - the full Rx FIFO size minus one full Tx plus one full Rx frame
+ */
+ hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
+
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ fc->pause_time = 0xFFFF;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
hw->mac.ops.reset_hw(hw);
@@ -3934,6 +3955,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
hw->hw_addr = NULL;
netif_device_detach(netdev);
netdev_err(netdev, "PCIe link lost, device now detached\n");
+ WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
}
return value;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 08d85e336bd4..39e73ad60352 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -50,8 +50,6 @@
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
-#define IXGBE_ETH_P_LLDP 0x88CC
-
/* flow control */
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
@@ -635,6 +633,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -774,11 +773,6 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
-
- /* AF_XDP zero-copy */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1039,4 +1033,10 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
#endif /* CONFIG_IXGBE_IPSEC */
+
+static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
+{
+ return !!adapter->xdp_prog;
+}
+
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 57fd9ee6de66..b613e72c8ee4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6288,6 +6288,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
+ adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ if (!adapter->af_xdp_zc_qps)
+ return -ENOMEM;
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -11161,6 +11165,7 @@ err_sw_init:
kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
kfree(adapter->rss_key);
+ bitmap_free(adapter->af_xdp_zc_qps);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -11249,6 +11254,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->mac_table);
kfree(adapter->rss_key);
+ bitmap_free(adapter->af_xdp_zc_qps);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d81a50dc9535..2c4d327fcc2e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -72,13 +72,13 @@
#define IXGBE_INCPER_SHIFT_82599 24
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
-#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
+#define IXGBE_PTP_TX_TIMEOUT (HZ)
-/* half of a one second clock period, for use with PPS signal. We have to use
- * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in
- * order to force at least 64bits of precision for shifting
+/* We use our own definitions instead of NSEC_PER_SEC because we want to mark
+ * the value as a ULL to force precision when bit shifting.
*/
-#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL
+#define NS_PER_SEC 1000000000ULL
+#define NS_PER_HALF_SEC 500000000ULL
/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL
* which contain measurements of seconds and nanoseconds respectively. This
@@ -141,23 +141,26 @@
#define MAX_TIMADJ 0x7FFFFFFF
/**
- * ixgbe_ptp_setup_sdp_x540
+ * ixgbe_ptp_setup_sdp_X540
* @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
- * the X540 device. It will create a 1second periodic output that can
+ * the X540 device. It will create a 1 second periodic output that can
* be used as the PPS (via an interrupt).
*
- * It calculates when the systime will be on an exact second, and then
- * aligns the start of the PPS signal to that value. The shift is
- * necessary because it can change based on the link speed.
+ * It calculates when the system time will be on an exact second, and then
+ * aligns the start of the PPS signal to that value.
+ *
+ * This works by using the cycle counter shift and mult values in reverse, and
+ * assumes that the values we're shifting will not overflow.
*/
-static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
+static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter)
{
+ struct cyclecounter *cc = &adapter->hw_cc;
struct ixgbe_hw *hw = &adapter->hw;
- int shift = adapter->hw_cc.shift;
u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem;
- u64 ns = 0, clock_edge = 0;
+ u64 ns = 0, clock_edge = 0, clock_period;
+ unsigned long flags;
/* disable the pin first */
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
@@ -177,26 +180,36 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
/* enable the Clock Out feature on SDP0, and allow
* interrupts to occur when the pin changes
*/
- tsauxc = IXGBE_TSAUXC_EN_CLK |
- IXGBE_TSAUXC_SYNCLK |
- IXGBE_TSAUXC_SDP0_INT;
+ tsauxc = (IXGBE_TSAUXC_EN_CLK |
+ IXGBE_TSAUXC_SYNCLK |
+ IXGBE_TSAUXC_SDP0_INT);
- /* clock period (or pulse length) */
- clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift);
- clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32);
-
- /* Account for the cyclecounter wrap-around value by
- * using the converted ns value of the current time to
- * check for when the next aligned second would occur.
+ /* Determine the clock time period to use. This assumes that the
+ * cycle counter shift is small enough to avoid overflow.
*/
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
- ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge);
+ clock_period = div_u64((NS_PER_HALF_SEC << cc->shift), cc->mult);
+ clktiml = (u32)(clock_period);
+ clktimh = (u32)(clock_period >> 32);
- div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem);
- clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift);
+ /* Read the current clock time, and save the cycle counter value */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->hw_tc);
+ clock_edge = adapter->hw_tc.cycle_last;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ /* Figure out how many seconds to add in order to round up */
+ div_u64_rem(ns, NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (NS_PER_SEC - rem);
- /* specify the initial clock start time */
+ /* Adjust the clock edge to align with the next full second. This
+ * assumes that the cycle counter shift is small enough to avoid
+ * overflowing when shifting the remainder.
+ */
+ clock_edge += div_u64((rem << cc->shift), cc->mult);
trgttiml = (u32)clock_edge;
trgttimh = (u32)(clock_edge >> 32);
@@ -212,8 +225,103 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_setup_sdp_X550
+ * @adapter: private adapter structure
+ *
+ * Enable or disable a clock output signal on SDP 0 for X550 hardware.
+ *
+ * Use the target time feature to align the output signal on the next full
+ * second.
+ *
+ * This works by using the cycle counter shift and mult values in reverse, and
+ * assumes that the values we're shifting will not overflow.
+ */
+static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
+{
+ u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp;
+ struct cyclecounter *cc = &adapter->hw_cc;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 ns = 0, clock_edge = 0;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ /* disable the pin first */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+ IXGBE_WRITE_FLUSH(hw);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
+ return;
+
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* enable the SDP0 pin as output, and connected to the
+ * native function for Timesync (ClockOut)
+ */
+ esdp |= IXGBE_ESDP_SDP0_DIR |
+ IXGBE_ESDP_SDP0_NATIVE;
+
+ /* enable the Clock Out feature on SDP0, and use Target Time 0 to
+ * enable generation of interrupts on the clock change.
+ */
+#define IXGBE_TSAUXC_DIS_TS_CLEAR 0x40000000
+ tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_ST0 |
+ IXGBE_TSAUXC_EN_TT0 | IXGBE_TSAUXC_SDP0_INT |
+ IXGBE_TSAUXC_DIS_TS_CLEAR);
+
+ tssdp = (IXGBE_TSSDP_TS_SDP0_EN |
+ IXGBE_TSSDP_TS_SDP0_CLK0);
+
+ /* Determine the clock time period to use. This assumes that the
+ * cycle counter shift is small enough to avoid overflowing a 32bit
+ * value.
+ */
+ freqout = div_u64(NS_PER_HALF_SEC << cc->shift, cc->mult);
+
+ /* Read the current clock time, and save the cycle counter value */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->hw_tc);
+ clock_edge = adapter->hw_tc.cycle_last;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. This
+ * assumes that the cycle counter shift is small enough to avoid
+ * overflowing when shifting the remainder.
+ */
+ clock_edge += div_u64((rem << cc->shift), cc->mult);
+
+ /* X550 hardware stores the time in 32bits of 'billions of cycles' and
+ * 32bits of 'cycles'. There's no guarantee that cycles represents
+ * nanoseconds. However, we can use the math from a timespec64 to
+ * convert into the hardware representation.
+ *
+ * See ixgbe_ptp_read_X550() for more details.
+ */
+ ts = ns_to_timespec64(clock_edge);
+ trgttiml = (u32)ts.tv_nsec;
+ trgttimh = (u32)ts.tv_sec;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FREQOUT0, freqout);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
+
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_REG(hw, IXGBE_TSSDP, tssdp);
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_read_X550 - read cycle counter value
- * @hw_cc: cyclecounter structure
+ * @cc: cyclecounter structure
*
* This function reads SYSTIME registers. It is called by the cyclecounter
* structure to convert from internal representation into nanoseconds. We need
@@ -221,10 +329,10 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
+static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
- container_of(hw_cc, struct ixgbe_adapter, hw_cc);
+ container_of(cc, struct ixgbe_adapter, hw_cc);
struct ixgbe_hw *hw = &adapter->hw;
struct timespec64 ts;
@@ -838,6 +946,15 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector,
ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
+/**
+ * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration
+ * @adapter: pointer to adapter structure
+ * @ifr: ioctl data
+ *
+ * This function returns the current timestamping settings. Rather than
+ * attempt to deconstruct registers to fill in the values, simply keep a copy
+ * of the old settings around, and return a copy when requested.
+ */
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config *config = &adapter->tstamp_config;
@@ -1253,7 +1370,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
- adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
+ adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name,
@@ -1280,13 +1397,13 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_alarm = 0;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
- adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
- adapter->ptp_setup_sdp = NULL;
+ adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X550;
break;
default:
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 345701af7749..537dfff585e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1645,7 +1645,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
+ ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 84f2dba39e36..2be1c4c72435 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1067,6 +1067,7 @@ struct ixgbe_nvm_version {
#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */
/* Diagnostic Registers */
#define IXGBE_RDSTATCTL 0x02C20
@@ -2240,11 +2241,18 @@ enum {
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
-#define IXGBE_TSAUXC_EN_CLK 0x00000004
-#define IXGBE_TSAUXC_SYNCLK 0x00000008
-#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_CLK 0x00000004
+#define IXGBE_TSAUXC_SYNCLK 0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_TT0 0x00000001
+#define IXGBE_TSAUXC_EN_TT1 0x00000002
+#define IXGBE_TSAUXC_ST0 0x00000010
#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100
+
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index bfe95ce0bd7f..6af55bb3bef3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -14,57 +14,10 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
- if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
- qid >= adapter->num_xsk_umems || !xdp_on)
+ if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return adapter->xsk_umems[qid];
-}
-
-static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
-{
- if (adapter->xsk_umems)
- return 0;
-
- adapter->num_xsk_umems_used = 0;
- adapter->num_xsk_umems = adapter->num_rx_queues;
- adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
- sizeof(*adapter->xsk_umems),
- GFP_KERNEL);
- if (!adapter->xsk_umems) {
- adapter->num_xsk_umems = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
- u16 qid)
-{
- int err;
-
- err = ixgbe_alloc_xsk_umems(adapter);
- if (err)
- return err;
-
- adapter->xsk_umems[qid] = umem;
- adapter->num_xsk_umems_used++;
-
- return 0;
-}
-
-static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
-{
- adapter->xsk_umems[qid] = NULL;
- adapter->num_xsk_umems_used--;
-
- if (adapter->num_xsk_umems == 0) {
- kfree(adapter->xsk_umems);
- adapter->xsk_umems = NULL;
- adapter->num_xsk_umems = 0;
- }
+ return xdp_get_umem_from_qid(adapter->netdev, qid);
}
static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
@@ -113,6 +66,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
struct xdp_umem *umem,
u16 qid)
{
+ struct net_device *netdev = adapter->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -120,12 +74,9 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
if (qid >= adapter->num_rx_queues)
return -EINVAL;
- if (adapter->xsk_umems) {
- if (qid >= adapter->num_xsk_umems)
- return -EINVAL;
- if (adapter->xsk_umems[qid])
- return -EBUSY;
- }
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
if (!reuseq)
@@ -138,14 +89,12 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return err;
if_running = netif_running(adapter->netdev) &&
- READ_ONCE(adapter->xdp_prog);
+ ixgbe_enabled_xdp_adapter(adapter);
if (if_running)
ixgbe_txrx_ring_disable(adapter, qid);
- err = ixgbe_add_xsk_umem(adapter, umem, qid);
- if (err)
- return err;
+ set_bit(qid, adapter->af_xdp_zc_qps);
if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
@@ -161,20 +110,21 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
{
+ struct xdp_umem *umem;
bool if_running;
- if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
- !adapter->xsk_umems[qid])
+ umem = xdp_get_umem_from_qid(adapter->netdev, qid);
+ if (!umem)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
- READ_ONCE(adapter->xdp_prog);
+ ixgbe_enabled_xdp_adapter(adapter);
if (if_running)
ixgbe_txrx_ring_disable(adapter, qid);
- ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
- ixgbe_remove_xsk_umem(adapter, qid);
+ clear_bit(qid, adapter->af_xdp_zc_qps);
+ ixgbe_xsk_umem_dma_unmap(adapter, umem);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -640,6 +590,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = len;
tx_bi->xdpf = NULL;
+ tx_bi->gso_segs = 1;
tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -704,7 +655,6 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
xsk_frames++;
tx_bi->xdpf = NULL;
- total_bytes += tx_bi->bytecount;
tx_bi++;
tx_desc++;
@@ -753,7 +703,7 @@ int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
if (qid >= adapter->num_xdp_queues)
return -ENXIO;
- if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+ if (!adapter->xdp_ring[qid]->xsk_umem)
return -ENXIO;
ring = adapter->xdp_ring[qid];
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index cd3b81300cc7..d5ce49636548 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -508,9 +508,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
-
- return 0;
+ return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ IXGBE_VFMAILBOX_SIZE);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 269bd73be1a0..94dc0a272644 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -437,6 +437,7 @@ struct mvneta_port {
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
+ struct phylink_config phylink_config;
struct phy *comphy;
struct mvneta_bm *bm_priv;
@@ -3356,9 +3357,11 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+static void mvneta_validate(struct phylink_config *config,
+ unsigned long *supported,
struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -3408,9 +3411,10 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_helper_basex_speed(state);
}
-static int mvneta_mac_link_state(struct net_device *ndev,
+static int mvneta_mac_link_state(struct phylink_config *config,
struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 gmac_stat;
@@ -3438,8 +3442,9 @@ static int mvneta_mac_link_state(struct net_device *ndev,
return 1;
}
-static void mvneta_mac_an_restart(struct net_device *ndev)
+static void mvneta_mac_an_restart(struct phylink_config *config)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
@@ -3449,9 +3454,10 @@ static void mvneta_mac_an_restart(struct net_device *ndev)
gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}
-static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
- const struct phylink_link_state *state)
+static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
@@ -3581,9 +3587,10 @@ static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
}
-static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
- phy_interface_t interface)
+static void mvneta_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 val;
@@ -3600,10 +3607,11 @@ static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
mvneta_set_eee(pp, false);
}
-static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface,
struct phy_device *phy)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 val;
@@ -4500,8 +4508,14 @@ static int mvneta_probe(struct platform_device *pdev)
comphy = NULL;
}
- phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
- &mvneta_phylink_ops);
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+
+ pp->phylink_config.dev = &dev->dev;
+ pp->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
+ phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
goto err_free_irq;
@@ -4513,8 +4527,6 @@ static int mvneta_probe(struct platform_device *pdev)
dev->ethtool_ops = &mvneta_eth_tool_ops;
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
pp->phylink = phylink;
pp->comphy = comphy;
pp->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 6171270a016c..d67c970f02e5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -148,6 +148,8 @@
#define MVPP22_CLS_C2_ATTR2 0x1b6c
#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
#define MVPP22_CLS_C2_ATTR3 0x1b70
+#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90
+#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0)
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
@@ -624,6 +626,7 @@
#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
/* RSS constants */
+#define MVPP22_N_RSS_TABLES 8
#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
@@ -725,6 +728,10 @@ enum mvpp2_prs_l3_cast {
/* Definitions */
struct mvpp2_dbgfs_entries;
+struct mvpp2_rss_table {
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
@@ -788,6 +795,9 @@ struct mvpp2 {
/* Debugfs entries private data */
struct mvpp2_dbgfs_entries *dbgfs_entries;
+
+ /* RSS Indirection tables */
+ struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
};
struct mvpp2_pcpu_stats {
@@ -905,6 +915,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
+ struct phylink_config phylink_config;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -919,12 +930,14 @@ struct mvpp2_port {
u32 tx_time_coal;
- /* RSS indirection table */
- u32 indir[MVPP22_RSS_TABLE_ENTRIES];
-
/* List of steering rules active on that port */
- struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES];
+ struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
int n_rfs_rules;
+
+ /* Each port has its own view of the rss contexts, so that it can number
+ * them from 0
+ */
+ int rss_ctx[MVPP22_N_RSS_TABLES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index a57d17ab91f0..e47c00c5f829 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -923,6 +923,12 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_c2_write(priv, &c2);
}
+ /* Disable the FIFO stages in C2 engine, which are only used in BIST
+ * mode
+ */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
+ MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+
mvpp2_cls_port_init_flows(priv);
}
@@ -963,12 +969,22 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
}
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
{
struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ /* The RxQ number is used to select the RSS table. It that case, we set
+ * it to be the ctx number.
+ */
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
@@ -977,22 +993,45 @@ static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ /* Reset the default destination RxQ to the port's first rx queue. */
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
}
-void mvpp22_port_rss_enable(struct mvpp2_port *port)
+static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
+{
+ return port->rss_ctx[port_rss_ctx];
+}
+
+int mvpp22_port_rss_enable(struct mvpp2_port *port)
{
- mvpp2_rss_port_c2_enable(port);
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
+ mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
+
+ return 0;
}
-void mvpp22_port_rss_disable(struct mvpp2_port *port)
+int mvpp22_port_rss_disable(struct mvpp2_port *port)
{
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
mvpp2_rss_port_c2_disable(port);
+
+ return 0;
}
static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
@@ -1029,7 +1068,7 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
struct flow_action_entry *act;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql, pmap;
- int index;
+ int index, ctx;
memset(&c2, 0, sizeof(c2));
@@ -1069,14 +1108,36 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
*/
c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+ /* Update RSS status after matching this entry */
+ if (act->queue.ctx)
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ /* Always lock the RSS_EN decision. We might have high prio
+ * rules steering to an RXQ, and a lower one steering to RSS,
+ * we don't want the low prio RSS rule overwriting this flag.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
/* Mark packet as "forwarded to software", needed for RSS */
c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
- qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ if (act->queue.ctx) {
+ /* Get the global ctx number */
+ ctx = mvpp22_rss_ctx(port, act->queue.ctx);
+ if (ctx < 0)
+ return -EINVAL;
+
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ } else {
+ qh = ((act->queue.index + port->first_rxq) >> 3) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = (act->queue.index + port->first_rxq) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ }
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
@@ -1196,6 +1257,13 @@ static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
return -EOPNOTSUPP;
+ /* When both an RSS context and an queue index are set, the index
+ * is considered as an offset to be added to the indirection table
+ * entries. We don't support this, so reject this rule.
+ */
+ if (act->queue.ctx && act->queue.index)
+ return -EOPNOTSUPP;
+
/* For now, only use the C2 engine which has a HEK size limited to 64
* bits for TCAM matching.
*/
@@ -1212,7 +1280,7 @@ int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
{
struct mvpp2_ethtool_fs *efs;
- if (rxnfc->fs.location >= MVPP2_N_RFS_RULES)
+ if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
return -EINVAL;
efs = port->rfs_rules[rxnfc->fs.location];
@@ -1232,8 +1300,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs, *old_efs;
int ret = 0;
- if (info->fs.location >= 4 ||
- info->fs.location < 0)
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
return -EINVAL;
efs = kzalloc(sizeof(*efs), GFP_KERNEL);
@@ -1242,6 +1309,12 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
input.fs = &info->fs;
+ /* We need to manually set the rss_ctx, since this info isn't present
+ * in info->fs
+ */
+ if (info->fs.flow_type & FLOW_RSS)
+ input.rss_ctx = info->rss_context;
+
ethtool_rule = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(ethtool_rule)) {
ret = PTR_ERR(ethtool_rule);
@@ -1328,19 +1401,160 @@ static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
}
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+static void mvpp22_rss_fill_table(struct mvpp2_port *port,
+ struct mvpp2_rss_table *table,
+ u32 rss_ctx)
{
struct mvpp2 *priv = port->priv;
int i;
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
- mvpp22_rxfh_indir(port, port->indir[i]));
+ mvpp22_rxfh_indir(port, table->indir[i]));
+ }
+}
+
+static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 ctx;
+
+ /* Find the first free RSS table */
+ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
+ if (!priv->rss_tables[ctx])
+ break;
+ }
+
+ if (ctx == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
+ GFP_KERNEL);
+ if (!priv->rss_tables[ctx])
+ return -ENOMEM;
+
+ *rss_ctx = ctx;
+
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+{
+ u32 rss_ctx;
+ int ret, i;
+
+ ret = mvpp22_rss_context_create(port, &rss_ctx);
+ if (ret)
+ return ret;
+
+ /* Find the first available context number in the port, starting from 1.
+ * Context 0 on each port is reserved for the default context.
+ */
+ for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
+ if (port->rss_ctx[i] < 0)
+ break;
+ }
+
+ if (i == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ port->rss_ctx[i] = rss_ctx;
+ *port_ctx = i;
+
+ return 0;
+}
+
+static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
+ int rss_ctx)
+{
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return NULL;
+
+ return priv->rss_tables[rss_ctx];
+}
+
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ struct ethtool_rxnfc *rxnfc;
+ int i, rss_ctx, ret;
+
+ rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ /* Invalidate any active classification rule that use this context */
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (!port->rfs_rules[i])
+ continue;
+
+ rxnfc = &port->rfs_rules[i]->rxnfc;
+ if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
+ rxnfc->rss_context != port_ctx)
+ continue;
+
+ ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
+ if (ret) {
+ netdev_warn(port->dev,
+ "couldn't remove classification rule %d associated to this context",
+ rxnfc->fs.location);
+ }
}
+
+ kfree(priv->rss_tables[rss_ctx]);
+
+ priv->rss_tables[rss_ctx] = NULL;
+ port->rss_ctx[port_ctx] = -1;
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
+ const u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(rss_table->indir, indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ mvpp22_rss_fill_table(port, rss_table, rss_ctx);
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
+ u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(indir, rss_table->indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ return 0;
}
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
@@ -1424,32 +1638,32 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-void mvpp22_port_rss_init(struct mvpp2_port *port)
+int mvpp22_port_rss_init(struct mvpp2_port *port)
{
- struct mvpp2 *priv = port->priv;
- int i;
+ struct mvpp2_rss_table *table;
+ u32 context = 0;
+ int i, ret;
- /* Set the table width: replace the whole classifier Rx queue number
- * with the ones configured in RSS table entries.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+ for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
+ port->rss_ctx[i] = -1;
- /* The default RxQ is used as a key to select the RSS table to use.
- * We use one RSS table per port.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX,
- MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
- mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(port->id));
+ ret = mvpp22_rss_context_create(port, &context);
+ if (ret)
+ return ret;
+
+ table = mvpp22_rss_table_get(port->priv, context);
+ if (!table)
+ return -EINVAL;
+
+ port->rss_ctx[0] = context;
/* Configure the first table to evenly distribute the packets across
* real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
- port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+ table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp22_rss_fill_table(port, port->id);
+ mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
/* Configure default flows */
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
@@ -1458,4 +1672,6 @@ void mvpp22_port_rss_init(struct mvpp2_port *port)
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 56b617375a65..26cc1176e758 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -249,11 +249,18 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
-void mvpp22_port_rss_init(struct mvpp2_port *port);
+int mvpp22_port_rss_init(struct mvpp2_port *port);
-void mvpp22_port_rss_enable(struct mvpp2_port *port);
-void mvpp22_port_rss_disable(struct mvpp2_port *port);
+int mvpp22_port_rss_enable(struct mvpp2_port *port);
+int mvpp22_port_rss_disable(struct mvpp2_port *port);
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
+ const u32 *indir);
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
+ u32 *indir);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d8e5241097a9..4b4d79611339 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -56,9 +56,9 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface, struct phy_device *phy);
/* Queue modes */
@@ -3237,9 +3237,9 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
struct phylink_link_state state = {
.interface = port->phy_interface,
};
- mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
- NULL);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface, NULL);
}
netif_tx_start_all_queues(port->dev);
@@ -3954,7 +3954,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
ret = mvpp2_ethtool_cls_rule_get(port, info);
break;
case ETHTOOL_GRXCLSRLALL:
- for (i = 0; i < MVPP2_N_RFS_RULES; i++) {
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
if (port->rfs_rules[i])
rules[loc++] = i;
}
@@ -4000,24 +4000,25 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
if (indir)
- memcpy(indir, port->indir,
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
- return 0;
+ return ret;
}
static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
@@ -4028,15 +4029,58 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
if (key)
return -EOPNOTSUPP;
- if (indir) {
- memcpy(port->indir, indir,
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
- mvpp22_rss_fill_table(port, port->id);
- }
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
- return 0;
+ return ret;
+}
+
+static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+
+ return ret;
}
+static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc, u32 *rss_context,
+ bool delete)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (delete)
+ return mvpp22_port_rss_ctx_delete(port, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = mvpp22_port_rss_ctx_create(port, rss_context);
+ if (ret)
+ return ret;
+ }
+
+ return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+}
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -4073,7 +4117,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
-
+ .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
+ .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -4416,11 +4461,12 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
eth_hw_addr_random(dev);
}
-static void mvpp2_phylink_validate(struct net_device *dev,
+static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* Invalid combinations */
@@ -4544,10 +4590,11 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_TX;
}
-static int mvpp2_phylink_mac_link_state(struct net_device *dev,
+static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4563,9 +4610,10 @@ static int mvpp2_phylink_mac_link_state(struct net_device *dev,
return 1;
}
-static void mvpp2_mac_an_restart(struct net_device *dev)
+static void mvpp2_mac_an_restart(struct phylink_config *config)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -4750,9 +4798,10 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
}
}
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
bool change_interface = port->phy_interface != state->interface;
@@ -4792,9 +4841,10 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
mvpp2_port_enable(port);
}
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface, struct phy_device *phy)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
@@ -4819,9 +4869,10 @@ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
netif_tx_wake_all_queues(dev);
}
-static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
- phy_interface_t interface)
+static void mvpp2_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
@@ -5078,8 +5129,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* Phylink isn't used w/ ACPI as of now */
if (port_node) {
- phylink = phylink_create(dev, port_fwnode, phy_mode,
- &mvpp2_phylink_ops);
+ port->phylink_config.dev = &dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&port->phylink_config, port_fwnode,
+ phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
goto err_free_port_pcpu;
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index d41a2414c575..2d8362f9341b 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -3,4 +3,5 @@
# Makefile for the Mediatek SoCs built-in ethernet macs
#
-obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100644
index 000000000000..61f705d945e5
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+ int (*set_path)(struct mtk_eth *eth, int path);
+};
+
+static const char * const mtk_eth_mux_name[] = {
+ "mux_gdm1_to_gmac1_esw", "mux_gmac2_gmac0_to_gephy",
+ "mux_u3_gmac2_to_qphy", "mux_gmac1_gmac2_to_sgmii_rgmii",
+ "mux_gmac12_to_gephy_sgmii",
+};
+
+static const char * const mtk_eth_path_name[] = {
+ "gmac1_rgmii", "gmac1_trgmii", "gmac1_sgmii", "gmac2_rgmii",
+ "gmac2_sgmii", "gmac2_gephy", "gdm1_esw",
+};
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+{
+ bool updated = true;
+ u32 val, mask, set;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = 0;
+ break;
+ case MTK_ETH_PATH_GDM1_ESW:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = MTK_MUX_TO_ESW;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated) {
+ val = mtk_r32(eth, MTK_MAC_MISC);
+ val = (val & mask) | set;
+ mtk_w32(eth, val, MTK_MAC_MISC);
+ }
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name[path], __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val = ~(u32)GEPHY_MAC_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name[path], __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = CO_QPHY_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name[path], __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val = SYSCFG0_SGMII_GMAC1;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = SYSCFG0_SGMII_GMAC2;
+ break;
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= SYSCFG0_SGMII_MASK;
+
+ if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+ (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+ val = 0;
+ else
+ updated = false;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name[path], __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val |= SYSCFG0_SGMII_GMAC1_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val |= SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ default:
+ updated = false;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name[path], __func__, updated);
+
+ return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+ { .set_path = set_mux_gdm1_to_gmac1_esw, },
+ { .set_path = set_mux_gmac2_gmac0_to_gephy, },
+ { .set_path = set_mux_u3_gmac2_to_qphy, },
+ { .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, },
+ { .set_path = set_mux_gmac12_to_gephy_sgmii, }
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+{
+ int i, err = 0;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_PATH_BIT(path))) {
+ dev_err(eth->dev, "path %s isn't support on the SoC\n",
+ mtk_eth_path_name[path]);
+ return -EINVAL;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+ return 0;
+
+ /* Setup MUX in path fabric */
+ for (i = 0; i < MTK_ETH_MUX_MAX; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_MUX_BIT(i))) {
+ err = mtk_eth_muxc[i].set_path(eth, path);
+ if (err)
+ goto out;
+ } else {
+ dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+ mtk_eth_mux_name[i]);
+ }
+ }
+
+out:
+ return err;
+}
+
+static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ unsigned int val = 0;
+ int sid, err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
+ MTK_ETH_PATH_GMAC2_SGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
+ * setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+ else
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
+
+ if (err)
+ return err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ return 0;
+}
+
+static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path = 0;
+
+ if (mac_id == 1)
+ path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
+ MTK_ETH_PATH_GMAC2_RGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
+{
+ int err;
+
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 765cd56ebcd2..362eacd82b92 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -54,8 +54,10 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
- "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
+ "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "sgmii_ck", "eth2pll",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -165,47 +167,6 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
-static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
-{
- u32 val;
-
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
- SGMII_LINK_TIMER_DEFAULT);
-
- regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
- val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
-
- regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
-
- regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
-
- /* Determine MUX for which GMAC uses the SGMII interface */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- val &= ~SYSCFG0_SGMII_MASK;
- val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
-
- dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
- mac_id);
- }
-
- /* Setup the GMAC1 going through SGMII path when SoC also support
- * ESW on GMAC1
- */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
- !mac_id) {
- mtk_w32(eth, 0, MTK_MAC_MISC);
- dev_info(eth->dev, "setup gmac1 going through sgmii");
- }
-}
-
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -295,6 +256,7 @@ static int mtk_phy_connect(struct net_device *dev)
struct mtk_eth *eth;
struct device_node *np;
u32 val;
+ int err;
eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
@@ -304,6 +266,10 @@ static int mtk_phy_connect(struct net_device *dev)
if (!np)
return -ENODEV;
+ err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
+ if (err)
+ goto err_phy;
+
mac->ge_mode = 0;
switch (of_get_phy_mode(np)) {
case PHY_INTERFACE_MODE_TRGMII:
@@ -312,12 +278,10 @@ static int mtk_phy_connect(struct net_device *dev)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
- break;
case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
- mtk_gmac_sgmii_hw_setup(eth, mac->id);
break;
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
mac->ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -2483,16 +2447,28 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->ethsys);
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- eth->sgmiisys =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,sgmiisys");
- if (IS_ERR(eth->sgmiisys)) {
- dev_err(&pdev->dev, "no sgmiisys regmap found\n");
- return PTR_ERR(eth->sgmiisys);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(eth->infra)) {
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
+ return PTR_ERR(eth->infra);
}
}
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+ GFP_KERNEL);
+ if (!eth->sgmii)
+ return -ENOMEM;
+
+ err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
+ eth->soc->ana_rgc3);
+
+ if (err)
+ return err;
+ }
+
if (eth->soc->required_pctl) {
eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,pctl");
@@ -2631,7 +2607,7 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
+ .caps = MT7623_CAPS | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
@@ -2643,22 +2619,31 @@ static const struct mtk_soc_data mt7621_data = {
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
+ .ana_rgc3 = 0x2028,
+ .caps = MT7622_CAPS | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
+ .caps = MT7623_CAPS | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
+static const struct mtk_soc_data mt7629_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7629_CAPS | MTK_HWLRO,
+ .required_clks = MT7629_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index f7501997cea0..a0aa5008d5cc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -15,6 +15,10 @@
#ifndef MTK_ETH_H
#define MTK_ETH_H
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#define MTK_QDMA_PAGE_SIZE 2048
@@ -369,9 +373,12 @@
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
-#define SYSCFG0_SGMII_MASK (3 << 8)
-#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8))
-#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8))
+#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
/* ethernet subsystem clock register */
#define ETHSYS_CLKCFG0 0x2c
@@ -399,6 +406,11 @@
#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
#define SGMII_PHYA_PWD BIT(4)
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2 0x70c
+#define CO_QPHY_SEL BIT(0)
+#define GEPHY_MAC_SEL BIT(1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -463,15 +475,21 @@ enum mtk_tx_flags {
*/
enum mtk_clks_map {
MTK_CLK_ETHIF,
+ MTK_CLK_SGMIITOP,
MTK_CLK_ESW,
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
MTK_CLK_SGMII_RX_250M,
MTK_CLK_SGMII_CDR_REF,
MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII2_TX_250M,
+ MTK_CLK_SGMII2_RX_250M,
+ MTK_CLK_SGMII2_CDR_REF,
+ MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
MTK_CLK_MAX
@@ -490,6 +508,19 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
+#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
enum mtk_dev_state {
MTK_HW_INIT,
@@ -560,21 +591,105 @@ struct mtk_rx_ring {
u32 crx_idx_reg;
};
-#define MTK_TRGMII BIT(0)
-#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII)
-#define MTK_ESW BIT(4)
-#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW)
-#define MTK_SGMII BIT(8)
-#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII)
-#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
-#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
- MTK_GMAC2_SGMII)
-#define MTK_HWLRO BIT(12)
-#define MTK_SHARED_INT BIT(13)
+enum mtk_eth_mux {
+ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+ MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+ MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+ MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+ MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+ MTK_ETH_MUX_MAX,
+};
+
+enum mtk_eth_path {
+ MTK_ETH_PATH_GMAC1_RGMII,
+ MTK_ETH_PATH_GMAC1_TRGMII,
+ MTK_ETH_PATH_GMAC1_SGMII,
+ MTK_ETH_PATH_GMAC2_RGMII,
+ MTK_ETH_PATH_GMAC2_SGMII,
+ MTK_ETH_PATH_GMAC2_GEPHY,
+ MTK_ETH_PATH_GDM1_ESW,
+ MTK_ETH_PATH_MAX,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII BIT(0)
+#define MTK_TRGMII BIT(1)
+#define MTK_SGMII BIT(2)
+#define MTK_ESW BIT(3)
+#define MTK_GEPHY BIT(4)
+#define MTK_MUX BIT(5)
+#define MTK_INFRA BIT(6)
+#define MTK_SHARED_SGMII BIT(7)
+#define MTK_HWLRO BIT(8)
+#define MTK_SHARED_INT BIT(9)
+
+/* Supported path present on SoCs */
+#define MTK_PATH_BIT(x) BIT((x) + 10)
+
+#define MTK_GMAC1_RGMII \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_RGMII) | MTK_RGMII)
+
+#define MTK_GMAC1_TRGMII \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_TRGMII) | MTK_TRGMII)
+
+#define MTK_GMAC1_SGMII \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC1_SGMII) | MTK_SGMII)
+
+#define MTK_GMAC2_RGMII \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_RGMII) | MTK_RGMII)
+
+#define MTK_GMAC2_SGMII \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_SGMII) | MTK_SGMII)
+
+#define MTK_GMAC2_GEPHY \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GMAC2_GEPHY) | MTK_GEPHY)
+
+#define MTK_GDM1_ESW \
+ (MTK_PATH_BIT(MTK_ETH_PATH_GDM1_ESW) | MTK_ESW)
+
+#define MTK_MUX_BIT(x) BIT((x) + 20)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW \
+ (MTK_MUX_BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW) | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
+ (MTK_MUX_BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY) | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY \
+ (MTK_MUX_BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY) | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ (MTK_MUX_BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII) | MTK_MUX | \
+ MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
+ (MTK_MUX_BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII) | MTK_MUX)
+
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
+#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+ MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
+
+#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII)
+
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
+ * @ana_rgc3: The offset for register ANA_RGC3 related to
+ * sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
@@ -582,6 +697,7 @@ struct mtk_rx_ring {
* the extra setup for those pins used by GMAC.
*/
struct mtk_soc_data {
+ u32 ana_rgc3;
u32 caps;
u32 required_clks;
bool required_pctl;
@@ -590,6 +706,26 @@ struct mtk_soc_data {
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
+#define MTK_SGMII_PHYSPEED_AN BIT(31)
+#define MTK_SGMII_PHYSPEED_MASK GENMASK(0, 2)
+#define MTK_SGMII_PHYSPEED_1000 BIT(0)
+#define MTK_SGMII_PHYSPEED_2500 BIT(1)
+#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
+
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @regmap: The register map pointing at the range used to setup
+ * SGMII modes
+ * @flags: The enum refers to which mode the sgmii wants to run on
+ * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ */
+
+struct mtk_sgmii {
+ struct regmap *regmap[MTK_MAX_DEVS];
+ u32 flags[MTK_MAX_DEVS];
+ u32 ana_rgc3;
+};
+
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
@@ -605,8 +741,8 @@ struct mtk_soc_data {
* @msg_enable: Ethtool msg level
* @ethsys: The register map pointing at the range used to setup
* MII modes
- * @sgmiisys: The register map pointing at the range used to setup
- * SGMII modes
+ * @infra: The register map pointing at the range used to setup
+ * SGMII and GePHY path
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
@@ -638,7 +774,8 @@ struct mtk_eth {
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
- struct regmap *sgmiisys;
+ struct regmap *infra;
+ struct mtk_sgmii *sgmii;
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -689,4 +826,10 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
+ u32 ana_rgc3);
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
+int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
+
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
new file mode 100644
index 000000000000..136f90ce5a65
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for MediaTek SGMII circuit
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ const char *str;
+ int i, err;
+
+ ss->ana_rgc3 = ana_rgc3;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->regmap[i] = syscon_node_to_regmap(np);
+ if (IS_ERR(ss->regmap[i]))
+ return PTR_ERR(ss->regmap[i]);
+
+ err = of_property_read_string(np, "mediatek,physpeed", &str);
+ if (err)
+ return err;
+
+ if (!strcmp(str, "2500"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
+ else if (!strcmp(str, "1000"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
+ else if (!strcmp(str, "auto"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
+ regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ SGMII_LINK_TIMER_DEFAULT);
+
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val |= SGMII_REMOTE_FAULT_DIS;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+ int mode;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ val &= ~GENMASK(2, 3);
+ mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
+ val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
+ regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+
+ /* Disable SGMII AN */
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val &= ~BIT(12);
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ /* SGMII force mode setting */
+ val = 0x31120019;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ /* Release PHYA power down state */
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 243368dc23db..d9d363fe5cf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -31,12 +31,15 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
+ lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
+ en/tc_tun_geneve.o
#
# Core extra
#
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
+ ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d2ab8cd8ad9f..30f7dffb5b1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
- case MLX5_CMD_OP_QUERY_HOST_PARAMS:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -628,7 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
- MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
+ MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 0ccd6d40baf7..d2228e37450f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_peer_pf_cleanup(dev);
}
-
-static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
-
- MLX5_SET(query_host_params_in, in, opcode,
- MLX5_CMD_OP_QUERY_HOST_PARAMS);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
-{
- u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
- int err;
-
- err = mlx5_query_host_params_context(dev, out, sizeof(out));
- if (err)
- return err;
-
- *num_vf = MLX5_GET(query_host_params_out, out,
- host_params_context.host_num_of_vfs);
- mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
index 346372df218f..d3d7a00a02ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
@@ -16,7 +16,6 @@ enum {
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
-int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */
@@ -24,9 +23,6 @@ static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
-static inline int
-mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
-{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3a183d690e23..4e417dfe4ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1074,8 +1074,6 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
-int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
- struct ethtool_flash *flash);
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index fe5d4d7f15ed..b099968b2b7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -3,9 +3,22 @@
#include <net/vxlan.h>
#include <net/gre.h>
-#include "lib/vxlan.h"
+#include <net/geneve.h>
#include "en/tc_tun.h"
+struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return &vxlan_tunnel;
+ else if (netif_is_geneve(tunnel_dev))
+ return &geneve_tunnel;
+ else if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return &gre_tunnel;
+ else
+ return NULL;
+}
+
static int get_route_and_out_devs(struct mlx5e_priv *priv,
struct net_device *dev,
struct net_device **route_dev,
@@ -141,63 +154,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
-{
- __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
- struct udphdr *udp = (struct udphdr *)(buf);
- struct vxlanhdr *vxh = (struct vxlanhdr *)
- ((char *)udp + sizeof(struct udphdr));
-
- udp->dest = tun_key->tp_dst;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(tun_id);
-
- return 0;
-}
-
-static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
-{
- __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
- int hdr_len;
- struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
-
- /* the HW does not calculate GRE csum or sequences */
- if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
- return -EOPNOTSUPP;
-
- greh->protocol = htons(ETH_P_TEB);
-
- /* GRE key */
- hdr_len = gre_calc_hlen(tun_key->tun_flags);
- greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
- if (tun_key->tun_flags & TUNNEL_KEY) {
- __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
- *ptr = tun_id;
- }
-
- return 0;
-}
-
static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
- int err = 0;
- struct ip_tunnel_key *key = &e->tun_info.key;
-
- if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- *ip_proto = IPPROTO_UDP;
- err = mlx5e_gen_vxlan_header(buf, key);
- } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- *ip_proto = IPPROTO_GRE;
- err = mlx5e_gen_gre_header(buf, key);
- } else {
- pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
- , e->tunnel_type);
- err = -EOPNOTSUPP;
+ if (!e->tunnel) {
+ pr_warn("mlx5: Cannot generate tunnel header for this tunnel\n");
+ return -EOPNOTSUPP;
}
- return err;
+ return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e);
}
static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
@@ -229,7 +194,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
@@ -253,7 +218,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
ipv4_encap_size =
(is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) +
- e->tunnel_hlen;
+ e->tunnel->calc_hlen(e);
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -345,7 +310,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
@@ -369,7 +334,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
ipv6_encap_size =
(is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct ipv6hdr) +
- e->tunnel_hlen;
+ e->tunnel->calc_hlen(e);
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -455,27 +420,12 @@ out:
return err;
}
-int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
-{
- if (netif_is_vxlan(tunnel_dev))
- return MLX5E_TC_TUNNEL_TYPE_VXLAN;
- else if (netif_is_gretap(tunnel_dev) ||
- netif_is_ip6gretap(tunnel_dev))
- return MLX5E_TC_TUNNEL_TYPE_GRETAP;
- else
- return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
-}
-
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
{
- int tunnel_type = mlx5e_tc_tun_get_type(netdev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(netdev);
- if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
- return true;
- else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
- MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
+ if (tunnel && tunnel->can_offload(priv))
return true;
else
return false;
@@ -486,71 +436,87 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
- e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(tunnel_dev);
- if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
-
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vxlan udp dport was not registered with the HW");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n",
- dst_port);
- return -EOPNOTSUPP;
- }
- e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
- e->tunnel_hlen = VXLAN_HLEN;
- } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
- e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
- } else {
+ if (!tunnel) {
e->reformat_type = -1;
- e->tunnel_hlen = -1;
return -EOPNOTSUPP;
}
- return 0;
+
+ return tunnel->init_encap_attr(tunnel_dev, priv, e, extack);
}
-static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *headers_c,
- void *headers_v)
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v, u8 *match_level)
+{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
+ int err = 0;
+
+ if (!tunnel) {
+ netdev_warn(priv->netdev,
+ "decapsulation offload is not supported for %s net device\n",
+ mlx5e_netdev_kind(filter_dev));
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ *match_level = tunnel->match_level;
+
+ if (tunnel->parse_udp_ports) {
+ err = tunnel->parse_udp_ports(priv, spec, f,
+ headers_c, headers_v);
+ if (err)
+ goto out;
+ }
+
+ if (tunnel->parse_tunnel) {
+ err = tunnel->parse_tunnel(priv, spec, f,
+ headers_c, headers_v);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- void *misc_c = MLX5_ADDR_OF(fte_match_param,
- spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param,
- spec->match_value,
- misc_parameters);
struct flow_match_ports enc_ports;
- flow_rule_match_enc_ports(rule, &enc_ports);
-
/* Full udp dst port must be given */
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
NL_SET_ERR_MSG_MOD(extack,
- "VXLAN decap filter must include enc_dst_port condition");
+ "UDP tunnel decap filter must include enc_dst_port condition");
netdev_warn(priv->netdev,
- "VXLAN decap filter must include enc_dst_port condition\n");
+ "UDP tunnel decap filter must include enc_dst_port condition\n");
return -EOPNOTSUPP;
}
- /* udp dst port must be knonwn as a VXLAN port */
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ if (memchr_inv(&enc_ports.mask->dst, 0xff,
+ sizeof(enc_ports.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack,
- "Matched UDP port is not registered as a VXLAN port");
+ "UDP tunnel decap filter must match enc_dst_port fully");
netdev_warn(priv->netdev,
- "UDP port %d is not registered as a VXLAN port\n",
- be16_to_cpu(enc_ports.key->dst));
+ "UDP tunnel decap filter must match enc_dst_port fully\n");
return -EOPNOTSUPP;
}
- /* dst UDP port is valid here */
+ /* match on UDP protocol and dst port number */
+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
@@ -559,92 +525,15 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(enc_ports.key->dst));
+ /* UDP src port on outer header is generated by HW,
+ * so it is probably a bad idea to request matching it.
+ * Nonetheless, it is allowed.
+ */
+
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(enc_ports.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(enc_ports.key->src));
- /* match on VNI */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid enc_keyid;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(enc_keyid.mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(enc_keyid.key->keyid));
- }
- return 0;
-}
-
-static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *outer_headers_c,
- void *outer_headers_v)
-{
- void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
-
- if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "GRE HW offloading is not supported");
- netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
- return -EOPNOTSUPP;
- }
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- ip_protocol, IPPROTO_GRE);
-
- /* gre protocol*/
- MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
-
- /* gre key */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid enc_keyid;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
- MLX5_SET(fte_match_set_misc, misc_c,
- gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v,
- gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
- }
-
return 0;
}
-
-int mlx5e_tc_tun_parse(struct net_device *filter_dev,
- struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level)
-{
- int tunnel_type;
- int err = 0;
-
- tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
- if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- *match_level = MLX5_MATCH_L4;
- err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
- headers_c, headers_v);
- } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- *match_level = MLX5_MATCH_L3;
- err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
- headers_c, headers_v);
- } else {
- netdev_warn(priv->netdev,
- "decapsulation offload is not supported for %s (kind: \"%s\")\n",
- netdev_name(filter_dev),
- mlx5e_netdev_kind(filter_dev));
-
- return -EOPNOTSUPP;
- }
- return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index b63f15de899d..3c48f7e62505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -14,9 +14,41 @@
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
- MLX5E_TC_TUNNEL_TYPE_GRETAP
+ MLX5E_TC_TUNNEL_TYPE_GENEVE,
+ MLX5E_TC_TUNNEL_TYPE_GRETAP,
};
+struct mlx5e_tc_tunnel {
+ int tunnel_type;
+ enum mlx5_flow_match_level match_level;
+
+ bool (*can_offload)(struct mlx5e_priv *priv);
+ int (*calc_hlen)(struct mlx5e_encap_entry *e);
+ int (*init_encap_attr)(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack);
+ int (*generate_ip_tun_hdr)(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e);
+ int (*parse_udp_ports)(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v);
+ int (*parse_tunnel)(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v);
+};
+
+extern struct mlx5e_tc_tunnel vxlan_tunnel;
+extern struct mlx5e_tc_tunnel geneve_tunnel;
+extern struct mlx5e_tc_tunnel gre_tunnel;
+
+struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
+
int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
@@ -30,7 +62,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
-int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
@@ -41,4 +72,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
void *headers_c,
void *headers_v, u8 *match_level);
+int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v);
+
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
new file mode 100644
index 000000000000..238ae85d07cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/geneve.h>
+#include "lib/geneve.h"
+#include "en/tc_tun.h"
+
+#define MLX5E_GENEVE_VER 0
+
+static bool mlx5e_tc_tun_can_offload_geneve(struct mlx5e_priv *priv)
+{
+ return !!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_GENEVE);
+}
+
+static int mlx5e_tc_tun_calc_hlen_geneve(struct mlx5e_encap_entry *e)
+{
+ return sizeof(struct udphdr) +
+ sizeof(struct genevehdr) +
+ e->tun_info->options_len;
+}
+
+static int mlx5e_tc_tun_check_udp_dport_geneve(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ports enc_ports;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ /* Currently we support only default GENEVE
+ * port, so udp dst port must match.
+ */
+ if (be16_to_cpu(enc_ports.key->dst) != GENEVE_UDP_PORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP dst port is not registered as a GENEVE port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a GENEVE port\n",
+ be16_to_cpu(enc_ports.key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_udp_ports_geneve(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err;
+
+ err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_check_udp_dport_geneve(priv, f);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_geneve(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &geneve_tunnel;
+
+ /* Reformat type for GENEVE encap is similar to VXLAN:
+ * in both cases the HW adds in the same place a
+ * defined encapsulation header that the SW provides.
+ */
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ return 0;
+}
+
+static void mlx5e_tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ vni[0] = (__force __u8)(tun_id >> 16);
+ vni[1] = (__force __u8)(tun_id >> 8);
+ vni[2] = (__force __u8)tun_id;
+#else
+ vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+ vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+ vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
+static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_info *tun_info = e->tun_info;
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct genevehdr *geneveh;
+
+ geneveh = (struct genevehdr *)((char *)udp + sizeof(struct udphdr));
+
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_info->key.tp_dst;
+
+ memset(geneveh, 0, sizeof(*geneveh));
+ geneveh->ver = MLX5E_GENEVE_VER;
+ geneveh->opt_len = tun_info->options_len / 4;
+ geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
+ geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
+ mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
+ geneveh->proto_type = htons(ETH_P_TEB);
+
+ if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (!geneveh->opt_len)
+ return -EOPNOTSUPP;
+ ip_tunnel_info_opts_get(geneveh->options, tun_info);
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_vni(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_enc_keyid enc_keyid;
+ void *misc_c, *misc_v;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_vni)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE VNI is not supported");
+ netdev_warn(priv->netdev, "Matching on GENEVE VNI is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, geneve_vni, be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_vni, be32_to_cpu(enc_keyid.key->keyid));
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ u8 max_tlv_option_data_len = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_option_data_len);
+ u8 max_tlv_options = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_options);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ void *misc_c, *misc_v, *misc_3_c, *misc_3_v;
+ struct geneve_opt *option_key, *option_mask;
+ __be32 opt_data_key = 0, opt_data_mask = 0;
+ struct flow_match_enc_opts enc_opts;
+ int res = 0;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ misc_3_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_3);
+ misc_3_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_3);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
+ return 0;
+
+ flow_rule_match_enc_opts(rule, &enc_opts);
+
+ if (memchr_inv(&enc_opts.mask->data, 0, sizeof(enc_opts.mask->data)) &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.geneve_tlv_option_0_data)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* make sure that we're talking about GENEVE options */
+
+ if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: option type is not GENEVE");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: option type is not GENEVE\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (enc_opts.mask->len &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_geneve_opt_len)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options len is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options len is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* max_geneve_tlv_option_data_len comes in multiples of 4 bytes, and it
+ * doesn't include the TLV option header. 'geneve_opt_len' is a total
+ * len of all the options, including the headers, also multiples of 4
+ * bytes. Len that comes from the dissector is in bytes.
+ */
+
+ if ((enc_opts.key->len / 4) > ((max_tlv_option_data_len + 1) * max_tlv_options)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: unsupported options len");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: unsupported options len (len=%d)\n",
+ enc_opts.key->len);
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, geneve_opt_len, enc_opts.mask->len / 4);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, enc_opts.key->len / 4);
+
+ /* we support matching on one option only, so just get it */
+ option_key = (struct geneve_opt *)&enc_opts.key->data[0];
+ option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
+
+ if (option_key->length > max_tlv_option_data_len) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: unsupported option len");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: unsupported option len (key=%d, mask=%d)\n",
+ option_key->length, option_mask->length);
+ return -EOPNOTSUPP;
+ }
+
+ /* data can't be all 0 - fail to offload such rule */
+ if (!memchr_inv(option_key->opt_data, 0, option_key->length * 4)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: can't match on 0 data field");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: can't match on 0 data field\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* add new GENEVE TLV options object */
+ res = mlx5_geneve_tlv_option_add(priv->mdev->geneve, option_key);
+ if (res) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: failed creating TLV opt object");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: failed creating TLV opt object (class:type:len = 0x%x:0x%x:%d)\n",
+ be16_to_cpu(option_key->opt_class),
+ option_key->type, option_key->length);
+ return res;
+ }
+
+ /* In general, after creating the object, need to query it
+ * in order to check which option data to set in misc3.
+ * But we support only geneve_tlv_option_0_data, so no
+ * point querying at this stage.
+ */
+
+ memcpy(&opt_data_key, option_key->opt_data, option_key->length * 4);
+ memcpy(&opt_data_mask, option_mask->opt_data, option_mask->length * 4);
+ MLX5_SET(fte_match_set_misc3, misc_3_v,
+ geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
+ MLX5_SET(fte_match_set_misc3, misc_3_c,
+ geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ struct netlink_ext_ack *extack = f->common.extack;
+
+ /* match on OAM - packets with OAM bit on should NOT be offloaded */
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_oam)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE OAM is not supported");
+ netdev_warn(priv->netdev, "Matching on GENEVE OAM is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_oam);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, 0);
+
+ /* Match on GENEVE protocol. We support only Transparent Eth Bridge. */
+
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_geneve_protocol_type)) {
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_protocol_type);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err;
+
+ err = mlx5e_tc_tun_parse_geneve_params(priv, spec, f);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_tun_parse_geneve_vni(priv, spec, f);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
+}
+
+struct mlx5e_tc_tunnel geneve_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = mlx5e_tc_tun_can_offload_geneve,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_geneve,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_geneve,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve,
+ .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve,
+ .parse_tunnel = mlx5e_tc_tun_parse_geneve,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
new file mode 100644
index 000000000000..06908441d932
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/gre.h>
+#include "en/tc_tun.h"
+
+static bool mlx5e_tc_tun_can_offload_gretap(struct mlx5e_priv *priv)
+{
+ return !!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap);
+}
+
+static int mlx5e_tc_tun_calc_hlen_gretap(struct mlx5e_encap_entry *e)
+{
+ return gre_calc_hlen(e->tun_info->key.tun_flags);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_gretap(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &gre_tunnel;
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ int hdr_len;
+
+ *ip_proto = IPPROTO_GRE;
+
+ /* the HW does not calculate GRE csum or sequences */
+ if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ return -EOPNOTSUPP;
+
+ greh->protocol = htons(ETH_P_TEB);
+
+ /* GRE key */
+ hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
+ greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
+ if (tun_key->tun_flags & TUNNEL_KEY) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+ *ptr = tun_id;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+
+ /* gre protocol */
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
+
+ /* gre key */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+ MLX5_SET(fte_match_set_misc, misc_c,
+ gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v,
+ gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel gre_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_GRETAP,
+ .match_level = MLX5_MATCH_L3,
+ .can_offload = mlx5e_tc_tun_can_offload_gretap,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_gretap,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_gretap,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
+ .parse_udp_ports = NULL,
+ .parse_tunnel = mlx5e_tc_tun_parse_gretap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
new file mode 100644
index 000000000000..2857b38527d6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/vxlan.h>
+#include "lib/vxlan.h"
+#include "en/tc_tun.h"
+
+static bool mlx5e_tc_tun_can_offload_vxlan(struct mlx5e_priv *priv)
+{
+ return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
+}
+
+static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
+{
+ return VXLAN_HLEN;
+}
+
+static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ports enc_ports;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ /* check the UDP destination port validity */
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
+ be16_to_cpu(enc_ports.key->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP dst port is not registered as a VXLAN port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a VXLAN port\n",
+ be16_to_cpu(enc_ports.key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err = 0;
+
+ err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_check_udp_dport_vxlan(priv, f);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_vxlan(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
+
+ e->tunnel = &vxlan_tunnel;
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vxlan udp dport was not registered with the HW");
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n",
+ dst_port);
+ return -EOPNOTSUPP;
+ }
+
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct vxlanhdr *vxh;
+
+ vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_key->tp_dst;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(tun_id);
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_enc_keyid enc_keyid;
+ void *misc_c, *misc_v;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ /* match on VNI is required */
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_vxlan_vni)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on VXLAN VNI is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on VXLAN VNI is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+ be32_to_cpu(enc_keyid.key->keyid));
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel vxlan_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = mlx5e_tc_tun_can_offload_vxlan,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_vxlan,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_vxlan,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
+ .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
+ .parse_tunnel = mlx5e_tc_tun_parse_vxlan,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index dd764e0471f2..ea59097dd4f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1867,40 +1867,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
- struct ethtool_flash *flash)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct net_device *dev = priv->netdev;
- const struct firmware *fw;
- int err;
-
- if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, flash->data, &dev->dev);
- if (err)
- return err;
-
- dev_hold(dev);
- rtnl_unlock();
-
- err = mlx5_firmware_flash(mdev, fw);
- release_firmware(fw);
-
- rtnl_lock();
- dev_put(dev);
- return err;
-}
-
-static int mlx5e_flash_device(struct net_device *dev,
- struct ethtool_flash *flash)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- return mlx5e_ethtool_flash_device(priv, flash);
-}
-
#ifndef CONFIG_MLX5_EN_RXNFC
/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
* otherwise this function will be defined from en_fs_ethtool.c
@@ -1939,7 +1905,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc,
#endif
- .flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 9aea9c5b2ce8..dde0021bd5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -128,7 +128,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
}
}
-static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -166,17 +166,6 @@ static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
-
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_uplink_rep_update_hw_counters(priv);
- else
- mlx5e_vf_rep_update_hw_counters(priv);
-}
-
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -203,7 +192,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
mlx5e_rep_update_sw_counters(priv);
- mlx5e_rep_update_hw_counters(priv);
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
@@ -363,7 +352,7 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
}
-static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -1101,7 +1090,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
-static int mlx5e_vf_rep_open(struct net_device *dev)
+static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1124,7 +1113,7 @@ unlock:
return err;
}
-static int mlx5e_vf_rep_close(struct net_device *dev)
+static int mlx5e_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1276,7 +1265,7 @@ static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev
}
static void
-mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1285,7 +1274,7 @@ mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
-static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
+static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
{
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
@@ -1318,16 +1307,16 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
- .ndo_open = mlx5e_vf_rep_open,
- .ndo_stop = mlx5e_vf_rep_close,
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+ .ndo_open = mlx5e_rep_open,
+ .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
+ .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
- .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
+ .ndo_change_mtu = mlx5e_rep_change_mtu,
.ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
};
@@ -1355,7 +1344,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
bool mlx5e_eswitch_rep(struct net_device *netdev)
{
- if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
+ if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
return true;
@@ -1418,9 +1407,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif
} else {
- netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
eth_hw_addr_random(netdev);
- netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
}
netdev->watchdog_timeo = 15 * HZ;
@@ -1640,7 +1629,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
}
}
-static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
+static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
mlx5e_set_netdev_mtu_boundaries(priv);
}
@@ -1712,15 +1701,15 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_lag_remove(mdev);
}
-static const struct mlx5e_profile mlx5e_vf_rep_profile = {
+static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
- .enable = mlx5e_vf_rep_enable,
- .update_stats = mlx5e_vf_rep_update_hw_counters,
+ .enable = mlx5e_rep_enable,
+ .update_stats = mlx5e_rep_update_hw_counters,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
@@ -1759,7 +1748,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev);
- profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
+ profile = (rep->vport == MLX5_VPORT_UPLINK) ?
+ &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
@@ -1769,7 +1759,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
rpriv->netdev = netdev;
- rep->rep_if[REP_ETH].priv = rpriv;
+ rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == MLX5_VPORT_UPLINK) {
@@ -1843,16 +1833,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
+static const struct mlx5_eswitch_rep_ops rep_ops = {
+ .load = mlx5e_vport_rep_load,
+ .unload = mlx5e_vport_rep_unload,
+ .get_proto_dev = mlx5e_vport_rep_get_proto_dev
+};
+
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep_if rep_if = {};
-
- rep_if.load = mlx5e_vport_rep_load;
- rep_if.unload = mlx5e_vport_rep_unload;
- rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH);
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
}
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 83b573b1abac..d4585f3b8cb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -91,7 +91,7 @@ struct mlx5e_rep_priv {
static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{
- return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+ return rep->rep_data[REP_ETH].priv;
}
struct mlx5e_neigh {
@@ -150,13 +150,12 @@ struct mlx5e_encap_entry {
struct hlist_node encap_hlist;
struct list_head flows;
u32 encap_id;
- struct ip_tunnel_info tun_info;
+ const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
struct net_device *route_dev;
- int tunnel_type;
- int tunnel_hlen;
+ struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
char *encap_header;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 31cd02f11499..151e55c8c1ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -53,6 +53,7 @@
#include "en/port.h"
#include "en/tc_tun.h"
#include "lib/devcom.h"
+#include "lib/geneve.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -126,7 +127,7 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
- struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
@@ -799,7 +800,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->match_level != MLX5_MATCH_NONE)
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
@@ -1063,6 +1064,19 @@ err_max_prio_chain:
return err;
}
+static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
+ void *headers_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ misc_parameters_3);
+ u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
+ headers_v,
+ geneve_tlv_option_0_data);
+
+ return !!geneve_tlv_opt_0_data;
+}
+
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -1084,6 +1098,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
+ if (mlx5_flow_has_geneve_opt(flow))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
+
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
@@ -1339,7 +1356,6 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
- struct flow_match_control enc_control;
int err;
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
@@ -1350,9 +1366,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err;
}
- flow_rule_match_enc_control(rule, &enc_control);
-
- if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
@@ -1372,7 +1386,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
- } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
flow_rule_match_enc_ipv6_addrs(rule, &match);
@@ -1497,29 +1511,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
- if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_enc_control(rule, &match);
- switch (match.key->addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
- return -EOPNOTSUPP;
- break;
- default:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
- }
/* In decap flow, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
@@ -2581,21 +2587,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
struct encap_key {
- struct ip_tunnel_key *ip_tun_key;
- int tunnel_type;
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
};
static inline int cmp_encap_info(struct encap_key *a,
struct encap_key *b)
{
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tunnel_type != b->tunnel_type;
+ a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
}
static inline int hash_encap_info(struct encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
- key->tunnel_type);
+ key->tc_tunnel->tunnel_type);
}
@@ -2625,7 +2631,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_info *tun_info;
struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
@@ -2634,17 +2640,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
int err = 0;
parse_attr = attr->parse_attr;
- tun_info = &parse_attr->tun_info[out_index];
+ tun_info = parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
- key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
+ key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info.key;
- e_key.tunnel_type = e->tunnel_type;
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
@@ -2659,7 +2665,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (!e)
return -ENOMEM;
- e->tun_info = *tun_info;
+ e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err;
@@ -2898,7 +2904,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = *info;
+ parse_attr->tun_info[attr->out_count] = info;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 195a7d903cec..6fd6d5356246 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -301,6 +301,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ bool send_doorbell;
wi->num_bytes = num_bytes;
wi->num_dma = num_dma;
@@ -310,8 +311,6 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- netdev_tx_sent_queue(sq->txq, num_bytes);
-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -321,7 +320,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++;
}
- if (!xmit_more || netif_xmit_stopped(sq->txq))
+ send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
+ xmit_more);
+ if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 23883d1fa22f..5e9319d3d90c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -533,8 +533,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
- if (mlx5_core_is_ecpf_esw_manager(dev))
- async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
+ if (mlx5_eswitch_is_funcs_handler(dev))
+ async_event_mask |=
+ (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
return async_event_mask;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6a921e24cd5e..5414e8f82d5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1686,13 +1686,41 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int query_esw_functions(struct mlx5_core_dev *dev,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {0};
+
+ MLX5_SET(query_esw_functions_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {0};
+ int err;
+
+ err = query_esw_functions(dev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+ esw_debug(dev, "host_num_of_vfs=%d\n", *num_vfs);
+
+ return 0;
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
- int vf_nvports = 0, total_nvports = 0;
struct mlx5_vport *vport;
+ int total_nvports = 0;
+ u16 vf_nvports = 0;
int err;
int i, enabled_events;
@@ -1712,7 +1740,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (mode == SRIOV_OFFLOADS) {
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
+ err = mlx5_esw_query_functions(esw->dev, &vf_nvports);
if (err)
return err;
total_nvports = esw->total_vports;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index d043d6f9797d..849a628f6d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -173,6 +173,9 @@ struct mlx5_esw_offload {
struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(termtbl_tbl, 8);
+ struct mutex termtbl_mutex; /* protects termtbl hash */
+ const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
u64 num_flows;
u8 encap;
@@ -190,7 +193,7 @@ struct mlx5_host_work {
struct mlx5_eswitch *esw;
};
-struct mlx5_host_info {
+struct mlx5_esw_functions {
struct mlx5_nb nb;
u16 num_vfs;
};
@@ -219,7 +222,7 @@ struct mlx5_eswitch {
int mode;
int nvports;
u16 manager_vport;
- struct mlx5_host_info host_info;
+ struct mlx5_esw_functions esw_funcs;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
@@ -268,6 +271,25 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
+struct mlx5_termtbl_handle;
+
+bool
+mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec);
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest);
+
+void
+mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
+ struct mlx5_termtbl_handle *tt);
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
@@ -338,6 +360,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *rep;
struct mlx5_core_dev *mdev;
u32 encap_id;
+ struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
@@ -386,6 +409,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
+int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -404,6 +429,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
+static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
+{
+ /* Ideally device should have the functions changed supported
+ * capability regardless of it being ECPF or PF wherever such
+ * event should be processed such as on eswitch manager device.
+ * However, some ECPF based device might not have this capability
+ * set. Hence OR for ECPF check to cover such device.
+ */
+ return MLX5_CAP_ESW(dev, esw_functions_changed) ||
+ mlx5_core_is_ecpf_esw_manager(dev);
+}
+
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{
/* Uplink always locate at the last element of the array.*/
@@ -498,6 +535,7 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
+static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 47b446d30f71..060de01f09b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -41,7 +41,6 @@
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
-#include "ecpf.h"
#include "lib/eq.h"
/* There are two match-all miss flows, one for unicast dst mac and
@@ -174,7 +173,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (attr->tunnel_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -193,7 +192,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
+ if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
+ rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
+ &flow_act, dest, i);
+ else
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
@@ -267,10 +270,10 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
source_eswitch_owner_vhca_id);
if (attr->match_level == MLX5_MATCH_NONE)
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
else
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
@@ -295,8 +298,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
bool fwd_rule)
{
bool split = (attr->split_count > 0);
+ int i;
mlx5_del_flow_rules(rule);
+
+ /* unref the term table */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ }
+
esw->offloads.num_flows--;
if (fwd_rule) {
@@ -333,7 +344,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -1278,7 +1289,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_if[rep_type].state,
+ atomic_set(&rep->rep_data[rep_type].state,
REP_UNREGISTERED);
}
@@ -1288,9 +1299,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
- if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED)
- rep->rep_if[rep_type].unload(rep);
+ esw->offloads.rep_ops[rep_type]->unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1351,11 +1362,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{
int err = 0;
- if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = rep->rep_if[rep_type].load(esw->dev, rep);
+ err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
- atomic_set(&rep->rep_if[rep_type].state,
+ atomic_set(&rep->rep_data[rep_type].state,
REP_REGISTERED);
}
@@ -1784,57 +1795,79 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_prio_tag_acls_cleanup(esw);
}
-static void esw_host_params_event_handler(struct work_struct *work)
+static void esw_functions_changed_event_handler(struct work_struct *work)
{
struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw;
- int err, num_vf = 0;
+ u16 num_vfs = 0;
+ int err;
host_work = container_of(work, struct mlx5_host_work, work);
esw = host_work->esw;
- err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
- if (err || num_vf == esw->host_info.num_vfs)
+ err = mlx5_esw_query_functions(esw->dev, &num_vfs);
+ if (err || num_vfs == esw->esw_funcs.num_vfs)
goto out;
/* Number of VFs can only change from "0 to x" or "x to 0". */
- if (esw->host_info.num_vfs > 0) {
- esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
+ if (esw->esw_funcs.num_vfs > 0) {
+ esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
} else {
- err = esw_offloads_load_vf_reps(esw, num_vf);
+ err = esw_offloads_load_vf_reps(esw, num_vfs);
if (err)
goto out;
}
- esw->host_info.num_vfs = num_vf;
+ esw->esw_funcs.num_vfs = num_vfs;
out:
kfree(host_work);
}
-static int esw_host_params_event(struct notifier_block *nb,
- unsigned long type, void *data)
+static int esw_functions_changed_event(struct notifier_block *nb,
+ unsigned long type, void *data)
{
+ struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work;
- struct mlx5_host_info *host_info;
struct mlx5_eswitch *esw;
host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
if (!host_work)
return NOTIFY_DONE;
- host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
- esw = container_of(host_info, struct mlx5_eswitch, host_info);
+ esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
+ esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
host_work->esw = esw;
- INIT_WORK(&host_work->work, esw_host_params_event_handler);
+ INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK;
}
+static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
+ u16 vf_nvports)
+{
+ if (!mlx5_eswitch_is_funcs_handler(esw->dev))
+ return;
+
+ MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
+ ESW_FUNCTIONS_CHANGED);
+ mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
+ esw->esw_funcs.num_vfs = vf_nvports;
+}
+
+static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_eswitch_is_funcs_handler(esw->dev))
+ return;
+
+ mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
+ flush_workqueue(esw->work_queue);
+}
+
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports)
{
@@ -1849,13 +1882,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
goto err_reps;
esw_offloads_devcom_init(esw);
+ mutex_init(&esw->offloads.termtbl_mutex);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
- HOST_PARAMS_CHANGE);
- mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
- esw->host_info.num_vfs = vf_nvports;
- }
+ esw_functions_changed_event_init(esw, vf_nvports);
mlx5_rdma_enable_roce(esw->dev);
@@ -1889,13 +1918,12 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
u16 num_vfs;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
- flush_workqueue(esw->work_queue);
- num_vfs = esw->host_info.num_vfs;
- } else {
+ esw_functions_changed_event_cleanup(esw);
+
+ if (mlx5_eswitch_is_funcs_handler(esw->dev))
+ num_vfs = esw->esw_funcs.num_vfs;
+ else
num_vfs = esw->dev->priv.sriov.num_vfs;
- }
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
@@ -2203,21 +2231,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep_if *__rep_if,
+ const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
{
- struct mlx5_eswitch_rep_if *rep_if;
+ struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
int i;
+ esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
- rep_if = &rep->rep_if[rep_type];
- rep_if->load = __rep_if->load;
- rep_if->unload = __rep_if->unload;
- rep_if->get_proto_dev = __rep_if->get_proto_dev;
- rep_if->priv = __rep_if->priv;
-
- atomic_set(&rep_if->state, REP_REGISTERED);
+ rep_data = &rep->rep_data[rep_type];
+ atomic_set(&rep_data->state, REP_REGISTERED);
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@@ -2232,7 +2256,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
- atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
+ atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -2241,7 +2265,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- return rep->rep_if[rep_type].priv;
+ return rep->rep_data[rep_type].priv;
}
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
@@ -2252,9 +2276,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport);
- if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
- rep->rep_if[rep_type].get_proto_dev)
- return rep->rep_if[rep_type].get_proto_dev(rep);
+ if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
+ esw->offloads.rep_ops[rep_type]->get_proto_dev)
+ return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
return NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
new file mode 100644
index 000000000000..cb7d8ebe2c95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include <linux/mlx5/fs.h>
+#include "eswitch.h"
+
+struct mlx5_termtbl_handle {
+ struct hlist_node termtbl_hlist;
+
+ struct mlx5_flow_table *termtbl;
+ struct mlx5_flow_act flow_act;
+ struct mlx5_flow_destination dest;
+
+ struct mlx5_flow_handle *rule;
+ int ref_count;
+};
+
+static u32
+mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest)
+{
+ u32 hash;
+
+ hash = jhash_1word(flow_act->action, 0);
+ hash = jhash((const void *)&flow_act->vlan,
+ sizeof(flow_act->vlan), hash);
+ hash = jhash((const void *)&dest->vport.num,
+ sizeof(dest->vport.num), hash);
+ hash = jhash((const void *)&dest->vport.vhca_id,
+ sizeof(dest->vport.num), hash);
+ return hash;
+}
+
+static int
+mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
+ struct mlx5_flow_destination *dest1,
+ struct mlx5_flow_act *flow_act2,
+ struct mlx5_flow_destination *dest2)
+{
+ return flow_act1->action != flow_act2->action ||
+ dest1->vport.num != dest2->vport.num ||
+ dest1->vport.vhca_id != dest2->vport.vhca_id ||
+ memcmp(&flow_act1->vlan, &flow_act2->vlan,
+ sizeof(flow_act1->vlan));
+}
+
+static int
+mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
+ struct mlx5_termtbl_handle *tt,
+ struct mlx5_flow_act *flow_act)
+{
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_spec spec = {};
+ int prio, flags;
+ int err;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* As this is the terminating action then the termination table is the
+ * same prio as the slow path
+ */
+ prio = FDB_SLOW_PATH;
+ flags = MLX5_FLOW_TABLE_TERMINATION;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
+ 0, flags);
+ if (IS_ERR(tt->termtbl)) {
+ esw_warn(dev, "Failed to create termination table\n");
+ return -EOPNOTSUPP;
+ }
+
+ tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
+ &tt->dest, 1);
+
+ if (IS_ERR(tt->rule)) {
+ esw_warn(dev, "Failed to create termination table rule\n");
+ goto add_flow_err;
+ }
+ return 0;
+
+add_flow_err:
+ err = mlx5_destroy_flow_table(tt->termtbl);
+ if (err)
+ esw_warn(dev, "Failed to destroy termination table\n");
+
+ return -EOPNOTSUPP;
+}
+
+static struct mlx5_termtbl_handle *
+mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_termtbl_handle *tt;
+ bool found = false;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&esw->offloads.termtbl_mutex);
+
+ hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
+ hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
+ termtbl_hlist, hash_key) {
+ if (!mlx5_eswitch_termtbl_cmp(&tt->flow_act, &tt->dest,
+ flow_act, dest)) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ goto tt_add_ref;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ err = -ENOMEM;
+ goto tt_create_err;
+ }
+
+ tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ tt->dest.vport.num = dest->vport.num;
+ tt->dest.vport.vhca_id = dest->vport.vhca_id;
+ memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
+
+ err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
+ if (err) {
+ esw_warn(esw->dev, "Failed to create termination table\n");
+ goto tt_create_err;
+ }
+ hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
+tt_add_ref:
+ tt->ref_count++;
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+ return tt;
+tt_create_err:
+ kfree(tt);
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+ return ERR_PTR(err);
+}
+
+void
+mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
+ struct mlx5_termtbl_handle *tt)
+{
+ mutex_lock(&esw->offloads.termtbl_mutex);
+ if (--tt->ref_count == 0)
+ hash_del(&tt->termtbl_hlist);
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+
+ if (!tt->ref_count) {
+ mlx5_del_flow_rules(tt->rule);
+ mlx5_destroy_flow_table(tt->termtbl);
+ kfree(tt);
+ }
+}
+
+static void
+mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
+ struct mlx5_flow_act *dst)
+{
+ if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
+ return;
+
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
+ memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
+
+ if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
+ return;
+
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+}
+
+bool
+mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec)
+{
+ u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+ misc_parameters.source_port);
+ u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.source_port);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
+ return false;
+
+ /* push vlan on RX */
+ return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
+ ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+}
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest)
+{
+ struct mlx5_flow_act term_tbl_act = {};
+ struct mlx5_flow_handle *rule = NULL;
+ bool term_table_created = false;
+ int num_vport_dests = 0;
+ int i, curr_dest;
+
+ mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ for (i = 0; i < num_dest; i++) {
+ struct mlx5_termtbl_handle *tt;
+
+ /* only vport destinations can be terminated */
+ if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
+ /* get the terminating table for the action list */
+ tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
+ &dest[i]);
+ if (IS_ERR(tt)) {
+ esw_warn(esw->dev, "Failed to create termination table\n");
+ goto revert_changes;
+ }
+ attr->dests[num_vport_dests].termtbl = tt;
+ num_vport_dests++;
+
+ /* link the destination with the termination table */
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = tt->termtbl;
+ term_table_created = true;
+ }
+
+ /* at least one destination should reference a termination table */
+ if (!term_table_created)
+ goto revert_changes;
+
+ /* create the FTE */
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule))
+ goto revert_changes;
+
+ goto out;
+
+revert_changes:
+ /* revert the changes that were made to the original flow_act
+ * and fall-back to the original rule actions
+ */
+ mlx5_eswitch_termtbl_actions_move(&term_tbl_act, flow_act);
+
+ for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
+ struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+
+ /* search for the destination associated with the
+ * current term table
+ */
+ for (i = 0; i < num_dest; i++) {
+ if (dest[i].ft != tt->termtbl)
+ continue;
+
+ memset(&dest[i], 0, sizeof(dest[i]));
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = tt->dest.vport.num;
+ dest[i].vport.vhca_id = tt->dest.vport.vhca_id;
+ mlx5_eswitch_termtbl_put(esw, tt);
+ break;
+ }
+ }
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+out:
+ return rule;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index a81e8d2168d8..8bcf3426b9c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
- case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
- return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
+ case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
+ return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 013b1ca4a791..bb24c3797218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5_core_dev *dev = ns->dev;
@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
+ term);
switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 1ab6f7e3bec6..e8fedb307b2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -552,7 +552,8 @@ static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
};
int mlx5_firmware_flash(struct mlx5_core_dev *dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
.mlxfw_dev = {
@@ -571,5 +572,6 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
}
- return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
+ return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev,
+ firmware, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..ebd81f6b556e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,14 +122,6 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
return mlx5e_ethtool_get_ts_info(priv, info);
}
-static int mlx5i_flash_device(struct net_device *netdev,
- struct ethtool_flash *flash)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
-
- return mlx5e_ethtool_flash_device(priv, flash);
-}
-
enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_1X = 1 << 0,
MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -241,7 +233,6 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam,
- .flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 8212bfd05733..e69766393990 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/netdevice.h>
+#include <net/nexthop.h>
#include "lag.h"
#include "lag_mp.h"
#include "mlx5_core.h"
@@ -110,6 +111,8 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ struct fib_nh *fib_nh0, *fib_nh1;
+ unsigned int nhs;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -120,9 +123,11 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
/* Handle add/replace event */
- if (fi->fib_nhs == 1) {
+ nhs = fib_info_num_path(fi);
+ if (nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
- struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev;
+ struct fib_nh *nh = fib_info_nh(fi, 0);
+ struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i);
@@ -130,14 +135,16 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
}
- if (fi->fib_nhs != 2)
+ if (nhs != 2)
return;
/* Verify next hops are ports of the same hca */
- if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev &&
- fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) &&
- !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev &&
- fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) {
+ fib_nh0 = fib_info_nh(fi, 0);
+ fib_nh1 = fib_info_nh(fi, 1);
+ if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[1].netdev) &&
+ !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -174,7 +181,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
mlx5_lag_set_port_affinity(ldev, i);
}
} else if (event == FIB_EVENT_NH_ADD &&
- fi->fib_nhs == 2) {
+ fib_info_num_path(fi) == 2) {
mlx5_lag_set_port_affinity(ldev, 0);
}
}
@@ -238,6 +245,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct mlx5_fib_event_work *fib_work;
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
+ struct net_device *fib_dev;
struct fib_info *fi;
if (info->family != AF_INET)
@@ -254,8 +262,13 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
fi = fen_info->fi;
- if (fi->fib_dev != ldev->pf[0].netdev &&
- fi->fib_dev != ldev->pf[1].netdev) {
+ if (fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ if (fib_dev != ldev->pf[0].netdev &&
+ fib_dev != ldev->pf[1].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
new file mode 100644
index 000000000000..23361a9ae4fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/kernel.h>
+#include "mlx5_core.h"
+#include "geneve.h"
+
+struct mlx5_geneve {
+ struct mlx5_core_dev *mdev;
+ __be16 opt_class;
+ u8 opt_type;
+ u32 obj_id;
+ struct mutex sync_lock; /* protect GENEVE obj operations */
+ u32 refcount;
+};
+
+static int mlx5_geneve_tlv_option_create(struct mlx5_core_dev *mdev,
+ __be16 class,
+ u8 type,
+ u8 len)
+{
+ u32 in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u64 general_obj_types;
+ void *hdr, *opt;
+ u16 obj_id;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT))
+ return -EINVAL;
+
+ hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
+ opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, geneve_tlv_opt);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
+
+ MLX5_SET(geneve_tlv_option, opt, option_class, be16_to_cpu(class));
+ MLX5_SET(geneve_tlv_option, opt, option_type, type);
+ MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return obj_id;
+}
+
+static void mlx5_geneve_tlv_option_destroy(struct mlx5_core_dev *mdev, u16 obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt)
+{
+ int res = 0;
+
+ if (IS_ERR_OR_NULL(geneve))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&geneve->sync_lock);
+
+ if (geneve->refcount) {
+ if (geneve->opt_class == opt->opt_class &&
+ geneve->opt_type == opt->type) {
+ /* We already have TLV options obj allocated */
+ geneve->refcount++;
+ } else {
+ /* TLV options obj allocated, but its params
+ * do not match the new request.
+ * We support only one such object.
+ */
+ mlx5_core_warn(geneve->mdev,
+ "Won't create Geneve TLV opt object with class:type:len = 0x%x:0x%x:%d (another class:type already exists)\n",
+ be16_to_cpu(opt->opt_class),
+ opt->type,
+ opt->length);
+ res = -EOPNOTSUPP;
+ goto unlock;
+ }
+ } else {
+ /* We don't have any TLV options obj allocated */
+
+ res = mlx5_geneve_tlv_option_create(geneve->mdev,
+ opt->opt_class,
+ opt->type,
+ opt->length);
+ if (res < 0) {
+ mlx5_core_warn(geneve->mdev,
+ "Failed creating Geneve TLV opt object class:type:len = 0x%x:0x%x:%d (err=%d)\n",
+ be16_to_cpu(opt->opt_class),
+ opt->type, opt->length, res);
+ goto unlock;
+ }
+ geneve->opt_class = opt->opt_class;
+ geneve->opt_type = opt->type;
+ geneve->obj_id = res;
+ geneve->refcount++;
+ }
+
+unlock:
+ mutex_unlock(&geneve->sync_lock);
+ return res;
+}
+
+void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve)
+{
+ if (IS_ERR_OR_NULL(geneve))
+ return;
+
+ mutex_lock(&geneve->sync_lock);
+ if (--geneve->refcount == 0) {
+ /* We've just removed the last user of Geneve option.
+ * Now delete the object in FW.
+ */
+ mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
+
+ geneve->opt_class = 0;
+ geneve->opt_type = 0;
+ geneve->obj_id = 0;
+ }
+ mutex_unlock(&geneve->sync_lock);
+}
+
+struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_geneve *geneve =
+ kzalloc(sizeof(*geneve), GFP_KERNEL);
+
+ if (!geneve)
+ return ERR_PTR(-ENOMEM);
+ geneve->mdev = mdev;
+ mutex_init(&geneve->sync_lock);
+
+ return geneve;
+}
+
+void mlx5_geneve_destroy(struct mlx5_geneve *geneve)
+{
+ if (IS_ERR_OR_NULL(geneve))
+ return;
+
+ /* Lockless since we are unloading */
+ if (geneve->refcount)
+ mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
+
+ kfree(geneve);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h
new file mode 100644
index 000000000000..adee0cbba19c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_GENEVE_H__
+#define __MLX5_GENEVE_H__
+
+#include <net/geneve.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_geneve;
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev);
+void mlx5_geneve_destroy(struct mlx5_geneve *geneve);
+
+int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt);
+void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline struct mlx5_geneve
+*mlx5_geneve_create(struct mlx5_core_dev *mdev) { return NULL; }
+static inline void
+mlx5_geneve_destroy(struct mlx5_geneve *geneve) {}
+static inline int
+mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt) { return 0; }
+static inline void
+mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve) {}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+
+#endif /* __MLX5_GENEVE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23d53163ce15..7ec135eaabc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -63,6 +63,7 @@
#include "accel/tls.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
+#include "lib/geneve.h"
#include "lib/devcom.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
@@ -821,6 +822,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_clock(dev);
dev->vxlan = mlx5_vxlan_create(dev);
+ dev->geneve = mlx5_geneve_create(dev);
err = mlx5_init_rl_table(dev);
if (err) {
@@ -865,6 +867,7 @@ err_mpfs_cleanup:
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -887,6 +890,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
@@ -1210,6 +1214,25 @@ out:
return err;
}
+static int mlx5_devlink_flash_update(struct devlink *devlink,
+ const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ const struct firmware *fw;
+ int err;
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, file_name, &dev->pdev->dev);
+ if (err)
+ return err;
+
+ return mlx5_firmware_flash(dev, fw, extack);
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -1219,6 +1242,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
#endif
+ .flash_update = mlx5_devlink_flash_update,
};
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 22e69d4813e4..d4dd8c1ae55c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -184,7 +184,8 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
-int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
+int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 86f77456f873..401441aefbcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return 0;
-destroy_flow_table:
- mlx5_destroy_flow_table(ft);
destroy_flow_group:
mlx5_destroy_flow_group(fg);
+destroy_flow_table:
+ mlx5_destroy_flow_table(ft);
free:
kvfree(spec);
kvfree(flow_group_in);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 14c0c62f8e73..c50e74ab02c4 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -5,6 +5,7 @@
#define _MLXFW_H
#include <linux/firmware.h>
+#include <linux/netlink.h>
enum mlxfw_fsm_state {
MLXFW_FSM_STATE_IDLE,
@@ -57,6 +58,10 @@ struct mlxfw_dev_ops {
void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ void (*status_notify)(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes);
};
struct mlxfw_dev {
@@ -67,11 +72,13 @@ struct mlxfw_dev {
#if IS_REACHABLE(CONFIG_MLXFW)
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware);
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
#else
static inline
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 240c027e5f07..67990406cba2 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -39,8 +39,19 @@ static const char * const mlxfw_fsm_state_err_str[] = {
"unknown error"
};
+static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ if (!mlxfw_dev->ops->status_notify)
+ return;
+ mlxfw_dev->ops->status_notify(mlxfw_dev, msg, comp_name,
+ done_bytes, total_bytes);
+}
+
static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- enum mlxfw_fsm_state fsm_state)
+ enum mlxfw_fsm_state fsm_state,
+ struct netlink_ext_ack *extack)
{
enum mlxfw_fsm_state_err fsm_state_err;
enum mlxfw_fsm_state curr_fsm_state;
@@ -57,11 +68,13 @@ retry:
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
+ NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
return -EINVAL;
}
if (curr_fsm_state != fsm_state) {
if (--times == 0) {
pr_err("Timeout reached on FSM state change");
+ NL_SET_ERR_MSG_MOD(extack, "Timeout reached on FSM state change");
return -ETIMEDOUT;
}
msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
@@ -76,16 +89,20 @@ retry:
static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
u32 fwhandle,
- struct mlxfw_mfa2_component *comp)
+ struct mlxfw_mfa2_component *comp,
+ struct netlink_ext_ack *extack)
{
u16 comp_max_write_size;
u8 comp_align_bits;
u32 comp_max_size;
+ char comp_name[8];
u16 block_size;
u8 *block_ptr;
u32 offset;
int err;
+ sprintf(comp_name, "%u", comp->index);
+
err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
&comp_max_size, &comp_align_bits,
&comp_max_write_size);
@@ -96,6 +113,7 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
if (comp->data_size > comp_max_size) {
pr_err("Component %d is of size %d which is bigger than limit %d\n",
comp->index, comp->data_size, comp_max_size);
+ NL_SET_ERR_MSG_MOD(extack, "Component is bigger than limit");
return -EINVAL;
}
@@ -103,6 +121,7 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
comp_align_bits);
pr_debug("Component update\n");
+ mlxfw_status_notify(mlxfw_dev, "Updating component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
comp->index,
comp->data_size);
@@ -110,11 +129,13 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
- MLXFW_FSM_STATE_DOWNLOAD);
+ MLXFW_FSM_STATE_DOWNLOAD, extack);
if (err)
goto err_out;
pr_debug("Component download\n");
+ mlxfw_status_notify(mlxfw_dev, "Downloading component",
+ comp_name, 0, comp->data_size);
for (offset = 0;
offset < MLXFW_ALIGN_UP(comp->data_size, comp_align_bits);
offset += comp_max_write_size) {
@@ -126,15 +147,20 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
offset);
if (err)
goto err_out;
+ mlxfw_status_notify(mlxfw_dev, "Downloading component",
+ comp_name, offset + block_size,
+ comp->data_size);
}
pr_debug("Component verify\n");
+ mlxfw_status_notify(mlxfw_dev, "Verifying component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
comp->index);
if (err)
goto err_out;
- err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_out;
return 0;
@@ -145,7 +171,8 @@ err_out:
}
static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- struct mlxfw_mfa2_file *mfa2_file)
+ struct mlxfw_mfa2_file *mfa2_file,
+ struct netlink_ext_ack *extack)
{
u32 component_count;
int err;
@@ -156,6 +183,7 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
&component_count);
if (err) {
pr_err("Could not find device PSID in MFA2 file\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not find device PSID in MFA2 file");
return err;
}
@@ -168,7 +196,7 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
return PTR_ERR(comp);
pr_info("Flashing component type %d\n", comp->index);
- err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp, extack);
mlxfw_mfa2_file_component_put(comp);
if (err)
return err;
@@ -177,7 +205,8 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
}
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxfw_mfa2_file *mfa2_file;
u32 fwhandle;
@@ -185,6 +214,7 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (!mlxfw_mfa2_check(firmware)) {
pr_err("Firmware file is not MFA2\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware file is not MFA2");
return -EINVAL;
}
@@ -193,29 +223,35 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
return PTR_ERR(mfa2_file);
pr_info("Initialize firmware flash process\n");
+ mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process",
+ NULL, 0, 0);
err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
if (err) {
pr_err("Could not lock the firmware FSM\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not lock the firmware FSM");
goto err_fsm_lock;
}
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
- MLXFW_FSM_STATE_LOCKED);
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_state_wait_idle_to_locked;
- err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file);
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file, extack);
if (err)
goto err_flash_components;
pr_debug("Activate image\n");
+ mlxfw_status_notify(mlxfw_dev, "Activating image", NULL, 0, 0);
err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
if (err) {
pr_err("Could not activate the downloaded image\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not activate the downloaded image");
goto err_fsm_activate;
}
- err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_state_wait_activate_to_locked;
@@ -223,6 +259,7 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
pr_info("Firmware flash done.\n");
+ mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0);
mlxfw_mfa2_file_fini(mfa2_file);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 6ee6de7f0160..1c4ef8ed1706 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1003,6 +1003,20 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
return err;
}
+static int mlxsw_devlink_flash_update(struct devlink *devlink,
+ const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->flash_update)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->flash_update(mlxsw_core, file_name,
+ component, extack);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1019,6 +1033,7 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
.info_get = mlxsw_devlink_info_get,
+ .flash_update = mlxsw_devlink_flash_update,
};
static int
@@ -1098,6 +1113,12 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_register_params;
}
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+ }
+
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -1107,22 +1128,17 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
- if (err)
- goto err_driver_init;
- }
-
if (mlxsw_driver->params_register && !reload)
devlink_params_publish(devlink);
return 0;
-err_driver_init:
- mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
+err_driver_init:
if (mlxsw_driver->params_unregister && !reload)
mlxsw_driver->params_unregister(mlxsw_core);
err_register_params:
@@ -1187,10 +1203,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (mlxsw_core->driver->params_unregister && !reload)
devlink_params_unpublish(devlink);
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
if (mlxsw_core->driver->params_unregister && !reload)
mlxsw_core->driver->params_unregister(mlxsw_core);
if (!reload)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e3832cb5bdda..a44ad0fb9477 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -284,6 +284,9 @@ struct mlxsw_driver {
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
+ int (*flash_update)(struct mlxsw_core *mlxsw_core,
+ const char *file_name, const char *component,
+ struct netlink_ext_ack *extack);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 72539a9a3847..d2c7ce67c300 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -92,33 +92,20 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
u16 temp;
} temp_thresh;
char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
- char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
- u16 module_temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned int module_temp;
bool qsfp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
- err = mlxsw_reg_query(core, MLXSW_REG(mtbr), mtbr_pl);
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
+ false, false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
-
- /* Don't read temperature thresholds for module with no valid info. */
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &module_temp, NULL);
- switch (module_temp) {
- case MLXSW_REG_MTBR_BAD_SENS_INFO: /* fall-through */
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA:
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, NULL);
+ if (!module_temp) {
*temp = 0;
return 0;
- default:
- /* Do not consider thresholds for zero temperature. */
- if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
- *temp = 0;
- return 0;
- }
- break;
}
/* Read Free Side Device Temperature Thresholds from page 03h
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 496dc904c5ed..056e3f55ae6c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -23,6 +23,14 @@ struct mlxsw_hwmon_attr {
char name[32];
};
+static int mlxsw_hwmon_get_attr_index(int index, int count)
+{
+ if (index >= count)
+ return index % count + MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+
+ return index;
+}
+
struct mlxsw_hwmon {
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -33,6 +41,7 @@ struct mlxsw_hwmon {
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
u8 sensor_count;
+ u8 module_sensor_count;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -44,10 +53,12 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned int temp;
+ int index;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index,
- false, false);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -66,10 +77,12 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned int temp_max;
+ int index;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index,
- false, false);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -88,6 +101,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
+ int index;
int err;
err = kstrtoul(buf, 10, &val);
@@ -96,7 +110,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
if (val != 1)
return -EINVAL;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
@@ -198,38 +214,18 @@ static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
- u16 temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned int temp;
u8 module;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
- err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
- if (err) {
- dev_err(dev, "Failed to query module temperature sensor\n");
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
+ false, false);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
return err;
- }
-
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
- /* Update status and temperature cache. */
- switch (temp) {
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA:
- temp = 0;
- break;
- case MLXSW_REG_MTBR_BAD_SENS_INFO:
- /* Untrusted cable is connected. Reading temperature from its
- * sensor is faulty.
- */
- temp = 0;
- break;
- default:
- temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
- break;
- }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
return sprintf(buf, "%u\n", temp);
}
@@ -333,6 +329,20 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
mlwsw_hwmon_attr->type_index);
}
+static ssize_t
+mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ int index = mlwsw_hwmon_attr->type_index -
+ mlxsw_hwmon->module_sensor_count + 1;
+
+ return sprintf(buf, "gearbox %03u\n", index);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
@@ -345,6 +355,7 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -428,6 +439,13 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_label", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_gbox_temp_label_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_label", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -556,6 +574,54 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
index, index);
index++;
}
+ mlxsw_hwmon->module_sensor_count = index;
+
+ return 0;
+}
+
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ int index, max_index, sensor_index;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u8 gbox_num;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL);
+ if (!gbox_num)
+ return 0;
+
+ index = mlxsw_hwmon->module_sensor_count;
+ max_index = mlxsw_hwmon->module_sensor_count + gbox_num;
+ while (index < max_index) {
+ sensor_index = index % mlxsw_hwmon->module_sensor_count +
+ MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+ mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core,
+ MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n",
+ sensor_index);
+ return err;
+ }
+ mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
+ index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ index, index);
+ index++;
+ }
return 0;
}
@@ -586,6 +652,10 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_temp_module_init;
+ err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ if (err)
+ goto err_temp_gearbox_init;
+
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
@@ -602,6 +672,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
err_hwmon_register:
+err_temp_gearbox_init:
err_temp_module_init:
err_fans_init:
err_temp_init:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d3e851e7ca72..cfab0e330a47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -449,39 +449,31 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
struct mlxsw_thermal_module *tz = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
struct device *dev = thermal->bus_info->dev;
- char mtbr_pl[MLXSW_REG_MTBR_LEN];
- u16 temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned int temp;
int err;
/* Read module temperature. */
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX +
- tz->module, 1);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtbr), mtbr_pl);
- if (err)
- return err;
-
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
- /* Update temperature. */
- switch (temp) {
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA: /* fall-through */
- case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN +
+ tz->module, false, false);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ /* Do not return error - in case of broken module's sensor
+ * it will cause error message flooding.
+ */
temp = 0;
- break;
- default:
- temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
- /* Reset all trip point. */
- mlxsw_thermal_module_trips_reset(tz);
- /* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core,
- tz);
- if (err)
- return err;
- break;
+ *p_temp = (int) temp;
+ return 0;
}
-
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
*p_temp = (int) temp;
+
+ if (!temp)
+ return 0;
+
+ /* Update trip points. */
+ mlxsw_thermal_module_trips_update(dev, thermal->core, tz);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 06aea1999518..95f408d0e103 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -43,11 +43,10 @@
#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28)
#define MLXSW_I2C_MBOX_SIZE 20
#define MLXSW_I2C_MBOX_OUT_PARAM_OFF 12
-#define MLXSW_I2C_MAX_BUFF_SIZE 32
#define MLXSW_I2C_MBOX_OFFSET_BITS 20
#define MLXSW_I2C_MBOX_SIZE_BITS 12
#define MLXSW_I2C_ADDR_BUF_SIZE 4
-#define MLXSW_I2C_BLK_MAX 32
+#define MLXSW_I2C_BLK_DEF 32
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
@@ -62,6 +61,7 @@
* @dev: I2C device;
* @core: switch core pointer;
* @bus_info: bus info block;
+ * @block_size: maximum block size allowed to pass to under layer;
*/
struct mlxsw_i2c {
struct {
@@ -74,6 +74,7 @@ struct mlxsw_i2c {
struct device *dev;
struct mlxsw_core *core;
struct mlxsw_bus_info bus_info;
+ u16 block_size;
};
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
@@ -315,20 +316,26 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
struct i2c_client *client = to_i2c_client(dev);
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
- u8 tran_buf[MLXSW_I2C_MAX_BUFF_SIZE + MLXSW_I2C_ADDR_BUF_SIZE];
int off = mlxsw_i2c->cmd.mb_off_in, chunk_size, i, j;
unsigned long end;
+ u8 *tran_buf;
struct i2c_msg write_tran =
- MLXSW_I2C_WRITE_MSG(client, tran_buf, MLXSW_I2C_PUSH_CMD_SIZE);
+ MLXSW_I2C_WRITE_MSG(client, NULL, MLXSW_I2C_PUSH_CMD_SIZE);
int err;
+ tran_buf = kmalloc(mlxsw_i2c->block_size + MLXSW_I2C_ADDR_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tran_buf)
+ return -ENOMEM;
+
+ write_tran.buf = tran_buf;
for (i = 0; i < num; i++) {
- chunk_size = (in_mbox_size > MLXSW_I2C_BLK_MAX) ?
- MLXSW_I2C_BLK_MAX : in_mbox_size;
+ chunk_size = (in_mbox_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : in_mbox_size;
write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off);
memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
- MLXSW_I2C_BLK_MAX * i, chunk_size);
+ mlxsw_i2c->block_size * i, chunk_size);
j = 0;
end = jiffies + timeout;
@@ -342,9 +349,10 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
(j++ < MLXSW_I2C_RETRY));
if (err != 1) {
- if (!err)
+ if (!err) {
err = -EIO;
- return err;
+ goto mlxsw_i2c_write_exit;
+ }
}
off += chunk_size;
@@ -355,24 +363,27 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 0);
if (err) {
dev_err(&client->dev, "Could not start transaction");
- return -EIO;
+ err = -EIO;
+ goto mlxsw_i2c_write_exit;
}
/* Wait until go bit is cleared. */
err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, p_status);
if (err) {
dev_err(&client->dev, "HW semaphore is not released");
- return err;
+ goto mlxsw_i2c_write_exit;
}
/* Validate transaction completion status. */
if (*p_status) {
dev_err(&client->dev, "Bad transaction completion status %x\n",
*p_status);
- return -EIO;
+ err = -EIO;
}
- return 0;
+mlxsw_i2c_write_exit:
+ kfree(tran_buf);
+ return err;
}
/* Routine executes I2C command. */
@@ -395,8 +406,8 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
if (in_mbox) {
reg_size = mlxsw_i2c_get_reg_size(in_mbox);
- num = reg_size / MLXSW_I2C_BLK_MAX;
- if (reg_size % MLXSW_I2C_BLK_MAX)
+ num = reg_size / mlxsw_i2c->block_size;
+ if (reg_size % mlxsw_i2c->block_size)
num++;
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
@@ -416,7 +427,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
} else {
/* No input mailbox is case of initialization query command. */
reg_size = MLXSW_I2C_MAX_DATA_SIZE;
- num = reg_size / MLXSW_I2C_BLK_MAX;
+ num = reg_size / mlxsw_i2c->block_size;
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
@@ -432,8 +443,8 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
/* Send read transaction to get output mailbox content. */
read_tran[1].buf = out_mbox;
for (i = 0; i < num; i++) {
- chunk_size = (reg_size > MLXSW_I2C_BLK_MAX) ?
- MLXSW_I2C_BLK_MAX : reg_size;
+ chunk_size = (reg_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : reg_size;
read_tran[1].len = chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off);
@@ -509,8 +520,20 @@ mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (!mbox)
return -ENOMEM;
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto mbox_put;
+
+ mlxsw_i2c->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
}
@@ -534,6 +557,7 @@ static const struct mlxsw_bus mlxsw_i2c_bus = {
static int mlxsw_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
struct mlxsw_i2c *mlxsw_i2c;
u8 status;
int err;
@@ -542,6 +566,22 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
if (!mlxsw_i2c)
return -ENOMEM;
+ if (quirks) {
+ if ((quirks->max_read_len &&
+ quirks->max_read_len < MLXSW_I2C_BLK_DEF) ||
+ (quirks->max_write_len &&
+ quirks->max_write_len < MLXSW_I2C_BLK_DEF)) {
+ dev_err(&client->dev, "Insufficient transaction buffer length\n");
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
+ min_t(u16, quirks->max_read_len,
+ quirks->max_write_len));
+ } else {
+ mlxsw_i2c->block_size = MLXSW_I2C_BLK_DEF;
+ }
+
i2c_set_clientdata(client, mlxsw_i2c);
mutex_init(&mlxsw_i2c->cmd.lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index cf2114273b72..471b0ca6d69a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -67,6 +67,23 @@ static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
+static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ strlcpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ sizeof(drvinfo->driver));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_m->bus_info->fw_rev.major,
+ mlxsw_m->bus_info->fw_rev.minor,
+ mlxsw_m->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
static int mlxsw_m_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
@@ -88,6 +105,7 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
}
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index e8002bfc1e8f..7348c5a5ad6a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -8039,13 +8039,15 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
+#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
* Sensors index to access.
* 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially
* (module 0 is mapped to sensor_index 64).
* Access: Index
*/
-MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7);
+MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 12);
/* Convert to milli degrees Celsius */
#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125)
@@ -8107,7 +8109,7 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
bool max_temp_enable,
bool max_temp_reset)
{
@@ -8156,7 +8158,7 @@ MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
* Access: Index
*/
-MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 7);
+MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 12);
/* reg_mtbr_num_rec
* Request: Number of records to read
@@ -8183,7 +8185,7 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u8 base_sensor_index,
+static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
@@ -9043,6 +9045,57 @@ static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
}
+/* MGPIR - Management General Peripheral Information Register
+ * ----------------------------------------------------------
+ * MGPIR register allows software to query the hardware and
+ * firmware general information of peripheral entities.
+ */
+#define MLXSW_REG_MGPIR_ID 0x9100
+#define MLXSW_REG_MGPIR_LEN 0xA0
+
+MLXSW_REG_DEFINE(mgpir, MLXSW_REG_MGPIR_ID, MLXSW_REG_MGPIR_LEN);
+
+enum mlxsw_reg_mgpir_device_type {
+ MLXSW_REG_MGPIR_DEVICE_TYPE_NONE,
+ MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
+};
+
+/* device_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
+
+/* devices_per_flash
+ * Number of devices of device_type per flash (can be shared by few devices).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
+
+/* num_of_devices
+ * Number of devices of device_type.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+
+static inline void mlxsw_reg_mgpir_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mgpir, payload);
+}
+
+static inline void
+mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
+ enum mlxsw_reg_mgpir_device_type *device_type,
+ u8 *devices_per_flash)
+{
+ if (num_of_devices)
+ *num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
+ if (device_type)
+ *device_type = mlxsw_reg_mgpir_device_type_get(payload);
+ if (devices_per_flash)
+ *devices_per_flash =
+ mlxsw_reg_mgpir_devices_per_flash_get(payload);
+}
+
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
@@ -10058,6 +10111,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
MLXSW_REG(mprs),
+ MLXSW_REG(mgpir),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index dfe6b44baf63..417e7c9273ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -294,6 +294,19 @@ static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
}
+static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+
+ devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core),
+ msg, comp_name,
+ done_bytes, total_bytes);
+}
+
static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.component_query = mlxsw_sp_component_query,
.fsm_lock = mlxsw_sp_fsm_lock,
@@ -303,11 +316,13 @@ static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.fsm_activate = mlxsw_sp_fsm_activate,
.fsm_query_state = mlxsw_sp_fsm_query_state,
.fsm_cancel = mlxsw_sp_fsm_cancel,
- .fsm_release = mlxsw_sp_fsm_release
+ .fsm_release = mlxsw_sp_fsm_release,
+ .status_notify = mlxsw_sp_status_notify,
};
static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
.mlxfw_dev = {
@@ -320,7 +335,10 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_core_fw_flash_start(mlxsw_sp->core);
- err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core));
+ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
+ firmware, extack);
+ devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core));
mlxsw_core_fw_flash_end(mlxsw_sp->core);
return err;
@@ -374,7 +392,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
return err;
}
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
@@ -388,6 +406,27 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
+ const char *file_name, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct firmware *firmware;
+ int err;
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&firmware, file_name,
+ mlxsw_sp->bus_info->dev);
+ if (err)
+ return err;
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
+ release_firmware(firmware);
+
+ return err;
+}
+
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -3159,31 +3198,6 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_flash_device(struct net_device *dev,
- struct ethtool_flash *flash)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- const struct firmware *firmware;
- int err;
-
- if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
- return -EOPNOTSUPP;
-
- dev_hold(dev);
- rtnl_unlock();
-
- err = request_firmware_direct(&firmware, flash->data, &dev->dev);
- if (err)
- goto out;
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
- release_firmware(firmware);
-out:
- rtnl_lock();
- dev_put(dev);
- return err;
-}
-
static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
@@ -3224,7 +3238,6 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_sset_count = mlxsw_sp_port_get_sset_count,
.get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
.set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
- .flash_device = mlxsw_sp_flash_device,
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
};
@@ -4889,6 +4902,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -4917,6 +4931,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 1cda8a248b12..23f17ea52061 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/arp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
+#include <net/nexthop.h>
#include <net/fib_rules.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
@@ -2886,7 +2887,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
@@ -2958,7 +2959,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
val ^= dev->ifindex;
}
@@ -3816,23 +3817,25 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
- const struct fib_info *fi)
+ struct fib_info *fi)
{
- return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK ||
- mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
+ const struct fib_nh *nh = fib_info_nh(fi, 0);
+
+ return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
{
+ unsigned int nhs = fib_info_num_path(fi);
struct mlxsw_sp_nexthop_group *nh_grp;
struct mlxsw_sp_nexthop *nh;
struct fib_nh *fib_nh;
int i;
int err;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs),
- GFP_KERNEL);
+ nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
nh_grp->priv = fi;
@@ -3840,11 +3843,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
nh_grp->neigh_tbl = &arp_tbl;
nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
- nh_grp->count = fi->fib_nhs;
+ nh_grp->count = nhs;
fib_info_hold(fi);
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
- fib_nh = &fi->fib_nh[i];
+ fib_nh = fib_info_nh(fi, i);
err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
if (err)
goto err_nexthop4_init;
@@ -3960,9 +3963,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.fib_nh_gw6))
+ &rt->fib6_nh->fib_nh_gw6))
return nh;
continue;
}
@@ -4022,13 +4025,13 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
@@ -4050,7 +4053,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4282,9 +4285,9 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
- struct net_device *dev = fen_info->fi->fib_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4928,7 +4931,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family;
+ return !(rt->fib6_flags & RTF_ADDRCONF) &&
+ rt->fib6_nh->fib_nh_gw_family;
}
static struct fib6_info *
@@ -4987,8 +4991,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.fib_nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
+ return rt->fib6_nh->fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
@@ -4998,7 +5002,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+ struct net_device *dev = rt->fib6_nh->fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -5041,11 +5045,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+ struct net_device *dev = rt->fib6_nh->fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.fib_nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh->fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5068,7 +5072,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->fib6_nh.fib_nh_gw_family ||
+ return rt->fib6_nh->fib_nh_gw_family ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -6118,6 +6122,20 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ } else if (info->family == AF_INET6) {
+ struct fib6_entry_notifier_info *fen6_info;
+
+ fen6_info = container_of(info,
+ struct fib6_entry_notifier_info,
+ info);
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
}
break;
}
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index cb52a3b128ae..9a36c26095c8 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o
+mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 02ad11e0b0d8..b71e4ecbe469 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -22,6 +22,7 @@
#include <net/switchdev.h>
#include "ocelot.h"
+#include "ocelot_ace.h"
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
@@ -130,6 +131,13 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
+static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port)
+{
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
+ ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
+ ANA_PORT_VCAP_S2_CFG, port->chip_port);
+}
+
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -884,6 +892,13 @@ static int ocelot_set_features(struct net_device *dev,
struct ocelot_port *port = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ port->tc.offload_cnt) {
+ netdev_err(dev,
+ "Cannot disable HW TC offload while offloads active\n");
+ return -EBUSY;
+ }
+
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
ocelot_vlan_mode(port, features);
@@ -917,6 +932,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
+ .ndo_setup_tc = ocelot_setup_tc,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1636,8 +1652,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
+ NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
@@ -1653,6 +1670,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
/* Basic L2 initialization */
ocelot_vlan_port_apply(ocelot, ocelot_port);
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1687,6 +1707,7 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
+ ocelot_ace_init(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
@@ -1799,6 +1820,7 @@ void ocelot_deinit(struct ocelot *ocelot)
{
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
+ ocelot_ace_deinit();
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 541fe41e60b0..f7eeb4806897 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -22,6 +22,7 @@
#include "ocelot_rew.h"
#include "ocelot_sys.h"
#include "ocelot_qs.h"
+#include "ocelot_tc.h"
#define PGID_AGGR 64
#define PGID_SRC 80
@@ -68,6 +69,7 @@ enum ocelot_target {
QSYS,
REW,
SYS,
+ S2,
HSIO,
TARGET_MAX,
};
@@ -334,6 +336,13 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
+ S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
+ S2_CORE_MV_CFG,
+ S2_CACHE_ENTRY_DAT,
+ S2_CACHE_MASK_DAT,
+ S2_CACHE_ACTION_DAT,
+ S2_CACHE_CNT_DAT,
+ S2_CACHE_TG_DAT,
};
enum ocelot_regfield {
@@ -454,6 +463,8 @@ struct ocelot_port {
phy_interface_t phy_mode;
struct phy *serdes;
+
+ struct ocelot_port_tc tc;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
new file mode 100644
index 000000000000..f74b98f7d8d1
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <linux/iopoll.h>
+#include <linux/proc_fs.h>
+
+#include "ocelot_ace.h"
+#include "ocelot_vcap.h"
+#include "ocelot_s2.h"
+
+#define OCELOT_POLICER_DISCARD 0x17f
+
+static struct ocelot_acl_block *acl_block;
+
+struct vcap_props {
+ const char *name; /* Symbolic name */
+ u16 tg_width; /* Type-group width (in bits) */
+ u16 sw_count; /* Sub word count */
+ u16 entry_count; /* Entry count */
+ u16 entry_words; /* Number of entry words */
+ u16 entry_width; /* Entry width (in bits) */
+ u16 action_count; /* Action count */
+ u16 action_words; /* Number of action words */
+ u16 action_width; /* Action width (in bits) */
+ u16 action_type_width; /* Action type width (in bits) */
+ struct {
+ u16 width; /* Action type width (in bits) */
+ u16 count; /* Action type sub word count */
+ } action_table[2];
+ u16 counter_words; /* Number of counter words */
+ u16 counter_width; /* Counter width (in bits) */
+};
+
+#define ENTRY_WIDTH 32
+#define BITS_TO_32BIT(x) (1 + (((x) - 1) / ENTRY_WIDTH))
+
+static const struct vcap_props vcap_is2 = {
+ .name = "IS2",
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VCAP_IS2_CNT,
+ .entry_words = BITS_TO_32BIT(VCAP_IS2_ENTRY_WIDTH),
+ .entry_width = VCAP_IS2_ENTRY_WIDTH,
+ .action_count = (VCAP_IS2_CNT + VCAP_PORT_CNT + 2),
+ .action_words = BITS_TO_32BIT(VCAP_IS2_ACTION_WIDTH),
+ .action_width = (VCAP_IS2_ACTION_WIDTH),
+ .action_type_width = 1,
+ .action_table = {
+ {
+ .width = (IS2_AO_ACL_ID + IS2_AL_ACL_ID),
+ .count = 2
+ },
+ {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = BITS_TO_32BIT(4 * ENTRY_WIDTH),
+ .counter_width = ENTRY_WIDTH,
+};
+
+enum vcap_sel {
+ VCAP_SEL_ENTRY = 0x1,
+ VCAP_SEL_ACTION = 0x2,
+ VCAP_SEL_COUNTER = 0x4,
+ VCAP_SEL_ALL = 0x7,
+};
+
+enum vcap_cmd {
+ VCAP_CMD_WRITE = 0, /* Copy from Cache to TCAM */
+ VCAP_CMD_READ = 1, /* Copy from TCAM to Cache */
+ VCAP_CMD_MOVE_UP = 2, /* Move <count> up */
+ VCAP_CMD_MOVE_DOWN = 3, /* Move <count> down */
+ VCAP_CMD_INITIALIZE = 4, /* Write all (from cache) */
+};
+
+#define VCAP_ENTRY_WIDTH 12 /* Max entry width (32bit words) */
+#define VCAP_COUNTER_WIDTH 4 /* Max counter width (32bit words) */
+
+struct vcap_data {
+ u32 entry[VCAP_ENTRY_WIDTH]; /* ENTRY_DAT */
+ u32 mask[VCAP_ENTRY_WIDTH]; /* MASK_DAT */
+ u32 action[VCAP_ENTRY_WIDTH]; /* ACTION_DAT */
+ u32 counter[VCAP_COUNTER_WIDTH]; /* CNT_DAT */
+ u32 tg; /* TG_DAT */
+ u32 type; /* Action type */
+ u32 tg_sw; /* Current type-group */
+ u32 cnt; /* Current counter */
+ u32 key_offset; /* Current entry offset */
+ u32 action_offset; /* Current action offset */
+ u32 counter_offset; /* Current counter offset */
+ u32 tg_value; /* Current type-group value */
+ u32 tg_mask; /* Current type-group mask */
+};
+
+static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
+{
+ return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
+}
+
+static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
+{
+ u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
+ S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
+ S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
+ int rc;
+
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2.entry_count)
+ return;
+
+ if (!(sel & VCAP_SEL_ENTRY))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
+
+ if (!(sel & VCAP_SEL_ACTION))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
+
+ if (!(sel & VCAP_SEL_COUNTER))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+
+ ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
+ rc = readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
+ (value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
+ 10, 100000);
+}
+
+/* Convert from 0-based row to VCAP entry row and run command */
+static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
+{
+ vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
+}
+
+static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i;
+
+ for (i = 0; i < vcap_is2.entry_words; i++) {
+ ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
+ ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ }
+ ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
+}
+
+static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i;
+
+ for (i = 0; i < vcap_is2.entry_words; i++) {
+ data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
+ // Invert mask
+ data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
+ }
+ data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
+}
+
+static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i, width, mask;
+
+ /* Encode action type */
+ width = vcap_is2.action_type_width;
+ if (width) {
+ mask = GENMASK(width, 0);
+ data->action[0] = ((data->action[0] & ~mask) | data->type);
+ }
+
+ for (i = 0; i < vcap_is2.action_words; i++)
+ ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
+
+ for (i = 0; i < vcap_is2.counter_words; i++)
+ ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
+}
+
+static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i, width;
+
+ for (i = 0; i < vcap_is2.action_words; i++)
+ data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
+
+ for (i = 0; i < vcap_is2.counter_words; i++)
+ data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
+
+ /* Extract action type */
+ width = vcap_is2.action_type_width;
+ data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
+}
+
+/* Calculate offsets for entry */
+static void is2_data_get(struct vcap_data *data, int ix)
+{
+ u32 i, col, offset, count, cnt, base, width = vcap_is2.tg_width;
+
+ count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
+ col = (ix % 2);
+ cnt = (vcap_is2.sw_count / count);
+ base = (vcap_is2.sw_count - col * cnt - cnt);
+ data->tg_value = 0;
+ data->tg_mask = 0;
+ for (i = 0; i < cnt; i++) {
+ offset = ((base + i) * width);
+ data->tg_value |= (data->tg_sw << offset);
+ data->tg_mask |= GENMASK(offset + width - 1, offset);
+ }
+
+ /* Calculate key/action/counter offsets */
+ col = (count - col - 1);
+ data->key_offset = (base * vcap_is2.entry_width) / vcap_is2.sw_count;
+ data->counter_offset = (cnt * col * vcap_is2.counter_width);
+ i = data->type;
+ width = vcap_is2.action_table[i].width;
+ cnt = vcap_is2.action_table[i].count;
+ data->action_offset =
+ (((cnt * col * width) / count) + vcap_is2.action_type_width);
+}
+
+static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
+{
+ u32 i, v, m;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (value & (1 << i))
+ v |= m;
+ else
+ v &= ~m;
+ data[offset / ENTRY_WIDTH] = v;
+ }
+}
+
+static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
+{
+ u32 i, v, m, value = 0;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (v & m)
+ value |= (1 << i);
+ }
+ return value;
+}
+
+static void vcap_key_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
+{
+ vcap_data_set(data->entry, offset + data->key_offset, width, value);
+ vcap_data_set(data->mask, offset + data->key_offset, width, mask);
+}
+
+static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
+ u8 *msk, u32 count)
+{
+ u32 i, j, n = 0, value = 0, mask = 0;
+
+ /* Data wider than 32 bits are split up in chunks of maximum 32 bits.
+ * The 32 LSB of the data are written to the 32 MSB of the TCAM.
+ */
+ offset += (count * 8);
+ for (i = 0; i < count; i++) {
+ j = (count - i - 1);
+ value += (val[j] << n);
+ mask += (msk[j] << n);
+ n += 8;
+ if (n == ENTRY_WIDTH || (i + 1) == count) {
+ offset -= n;
+ vcap_key_set(data, offset, n, value, mask);
+ n = 0;
+ value = 0;
+ mask = 0;
+ }
+ }
+}
+
+static void vcap_key_l4_port_set(struct vcap_data *data, u32 offset,
+ struct ocelot_vcap_udp_tcp *port)
+{
+ vcap_key_set(data, offset, 16, port->value, port->mask);
+}
+
+static void vcap_key_bit_set(struct vcap_data *data, u32 offset,
+ enum ocelot_vcap_bit val)
+{
+ vcap_key_set(data, offset, 1, val == OCELOT_VCAP_BIT_1 ? 1 : 0,
+ val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
+}
+
+#define VCAP_KEY_SET(fld, val, msk) \
+ vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, val, msk)
+#define VCAP_KEY_ANY_SET(fld) \
+ vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, 0, 0)
+#define VCAP_KEY_BIT_SET(fld, val) vcap_key_bit_set(&data, IS2_HKO_##fld, val)
+#define VCAP_KEY_BYTES_SET(fld, val, msk) \
+ vcap_key_bytes_set(&data, IS2_HKO_##fld, val, msk, IS2_HKL_##fld / 8)
+
+static void vcap_action_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value)
+{
+ vcap_data_set(data->action, offset + data->action_offset, width, value);
+}
+
+#define VCAP_ACT_SET(fld, val) \
+ vcap_action_set(data, IS2_AO_##fld, IS2_AL_##fld, val)
+
+static void is2_action_set(struct vcap_data *data,
+ enum ocelot_ace_action action)
+{
+ switch (action) {
+ case OCELOT_ACL_ACTION_DROP:
+ VCAP_ACT_SET(PORT_MASK, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x1);
+ VCAP_ACT_SET(POLICE_ENA, 0x1);
+ VCAP_ACT_SET(POLICE_IDX, OCELOT_POLICER_DISCARD);
+ VCAP_ACT_SET(CPU_QU_NUM, 0x0);
+ VCAP_ACT_SET(CPU_COPY_ENA, 0x0);
+ break;
+ case OCELOT_ACL_ACTION_TRAP:
+ VCAP_ACT_SET(PORT_MASK, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x0);
+ VCAP_ACT_SET(POLICE_ENA, 0x0);
+ VCAP_ACT_SET(POLICE_IDX, 0x0);
+ VCAP_ACT_SET(CPU_QU_NUM, 0x0);
+ VCAP_ACT_SET(CPU_COPY_ENA, 0x1);
+ break;
+ }
+}
+
+static void is2_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_ace_rule *ace)
+{
+ u32 val, msk, type, type_mask = 0xf, i, count;
+ struct ocelot_ace_vlan *tag = &ace->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = (ix / 2);
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, &data);
+ vcap_cache2action(ocelot, &data);
+
+ data.tg_sw = VCAP_TG_HALF;
+ is2_data_get(&data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (ace->prio != 0)
+ data.tg |= data.tg_value;
+
+ data.type = IS2_ACTION_TYPE_NORMAL;
+
+ VCAP_KEY_ANY_SET(PAG);
+ VCAP_KEY_SET(IGR_PORT_MASK, 0, ~BIT(ace->chip_port));
+ VCAP_KEY_BIT_SET(FIRST, OCELOT_VCAP_BIT_1);
+ VCAP_KEY_BIT_SET(HOST_MATCH, OCELOT_VCAP_BIT_ANY);
+ VCAP_KEY_BIT_SET(L2_MC, ace->dmac_mc);
+ VCAP_KEY_BIT_SET(L2_BC, ace->dmac_bc);
+ VCAP_KEY_BIT_SET(VLAN_TAGGED, tag->tagged);
+ VCAP_KEY_SET(VID, tag->vid.value, tag->vid.mask);
+ VCAP_KEY_SET(PCP, tag->pcp.value[0], tag->pcp.mask[0]);
+ VCAP_KEY_BIT_SET(DEI, tag->dei);
+
+ switch (ace->type) {
+ case OCELOT_ACE_TYPE_ETYPE: {
+ struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
+
+ type = IS2_TYPE_ETYPE;
+ VCAP_KEY_BYTES_SET(L2_DMAC, etype->dmac.value,
+ etype->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, etype->smac.value,
+ etype->smac.mask);
+ VCAP_KEY_BYTES_SET(MAC_ETYPE_ETYPE, etype->etype.value,
+ etype->etype.mask);
+ VCAP_KEY_ANY_SET(MAC_ETYPE_L2_PAYLOAD); // Clear unused bits
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ETYPE_L2_PAYLOAD,
+ etype->data.value, etype->data.mask, 2);
+ break;
+ }
+ case OCELOT_ACE_TYPE_LLC: {
+ struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
+
+ type = IS2_TYPE_LLC;
+ VCAP_KEY_BYTES_SET(L2_DMAC, llc->dmac.value, llc->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, llc->smac.value, llc->smac.mask);
+ for (i = 0; i < 4; i++) {
+ payload.value[i] = llc->llc.value[i];
+ payload.mask[i] = llc->llc.mask[i];
+ }
+ VCAP_KEY_BYTES_SET(MAC_LLC_L2_LLC, payload.value, payload.mask);
+ break;
+ }
+ case OCELOT_ACE_TYPE_SNAP: {
+ struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
+
+ type = IS2_TYPE_SNAP;
+ VCAP_KEY_BYTES_SET(L2_DMAC, snap->dmac.value, snap->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, snap->smac.value, snap->smac.mask);
+ VCAP_KEY_BYTES_SET(MAC_SNAP_L2_SNAP,
+ ace->frame.snap.snap.value,
+ ace->frame.snap.snap.mask);
+ break;
+ }
+ case OCELOT_ACE_TYPE_ARP: {
+ struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
+
+ type = IS2_TYPE_ARP;
+ VCAP_KEY_BYTES_SET(MAC_ARP_L2_SMAC, arp->smac.value,
+ arp->smac.mask);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_ADDR_SPACE_OK, arp->ethernet);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_PROTO_SPACE_OK, arp->ip);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_LEN_OK, arp->length);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_TGT_MATCH, arp->dmac_match);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_SENDER_MATCH, arp->smac_match);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_OPCODE_UNKNOWN, arp->unknown);
+
+ /* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
+ val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
+ (arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
+ msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
+ (arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
+ VCAP_KEY_SET(MAC_ARP_ARP_OPCODE, val, msk);
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr, 4);
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr, 4);
+ VCAP_KEY_ANY_SET(MAC_ARP_DIP_EQ_SIP);
+ break;
+ }
+ case OCELOT_ACE_TYPE_IPV4:
+ case OCELOT_ACE_TYPE_IPV6: {
+ enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp;
+ enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg;
+ enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh;
+ struct ocelot_ace_frame_ipv4 *ipv4 = NULL;
+ struct ocelot_ace_frame_ipv6 *ipv6 = NULL;
+ struct ocelot_vcap_udp_tcp *sport, *dport;
+ struct ocelot_vcap_ipv4 sip, dip;
+ struct ocelot_vcap_u8 proto, ds;
+ struct ocelot_vcap_u48 *ip_data;
+
+ if (ace->type == OCELOT_ACE_TYPE_IPV4) {
+ ipv4 = &ace->frame.ipv4;
+ ttl = ipv4->ttl;
+ fragment = ipv4->fragment;
+ options = ipv4->options;
+ proto = ipv4->proto;
+ ds = ipv4->ds;
+ ip_data = &ipv4->data;
+ sip = ipv4->sip;
+ dip = ipv4->dip;
+ sport = &ipv4->sport;
+ dport = &ipv4->dport;
+ tcp_fin = ipv4->tcp_fin;
+ tcp_syn = ipv4->tcp_syn;
+ tcp_rst = ipv4->tcp_rst;
+ tcp_psh = ipv4->tcp_psh;
+ tcp_ack = ipv4->tcp_ack;
+ tcp_urg = ipv4->tcp_urg;
+ sip_eq_dip = ipv4->sip_eq_dip;
+ sport_eq_dport = ipv4->sport_eq_dport;
+ seq_zero = ipv4->seq_zero;
+ } else {
+ ipv6 = &ace->frame.ipv6;
+ ttl = ipv6->ttl;
+ fragment = OCELOT_VCAP_BIT_ANY;
+ options = OCELOT_VCAP_BIT_ANY;
+ proto = ipv6->proto;
+ ds = ipv6->ds;
+ ip_data = &ipv6->data;
+ for (i = 0; i < 8; i++) {
+ val = ipv6->sip.value[i + 8];
+ msk = ipv6->sip.mask[i + 8];
+ if (i < 4) {
+ dip.value.addr[i] = val;
+ dip.mask.addr[i] = msk;
+ } else {
+ sip.value.addr[i - 4] = val;
+ sip.mask.addr[i - 4] = msk;
+ }
+ }
+ sport = &ipv6->sport;
+ dport = &ipv6->dport;
+ tcp_fin = ipv6->tcp_fin;
+ tcp_syn = ipv6->tcp_syn;
+ tcp_rst = ipv6->tcp_rst;
+ tcp_psh = ipv6->tcp_psh;
+ tcp_ack = ipv6->tcp_ack;
+ tcp_urg = ipv6->tcp_urg;
+ sip_eq_dip = ipv6->sip_eq_dip;
+ sport_eq_dport = ipv6->sport_eq_dport;
+ seq_zero = ipv6->seq_zero;
+ }
+
+ VCAP_KEY_BIT_SET(IP4,
+ ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ VCAP_KEY_BIT_SET(L3_FRAGMENT, fragment);
+ VCAP_KEY_ANY_SET(L3_FRAG_OFS_GT0);
+ VCAP_KEY_BIT_SET(L3_OPTIONS, options);
+ VCAP_KEY_BIT_SET(L3_TTL_GT0, ttl);
+ VCAP_KEY_BYTES_SET(L3_TOS, ds.value, ds.mask);
+ vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_DIP, dip.value.addr,
+ dip.mask.addr, 4);
+ vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_SIP, sip.value.addr,
+ sip.mask.addr, 4);
+ VCAP_KEY_BIT_SET(DIP_EQ_SIP, sip_eq_dip);
+ val = proto.value[0];
+ msk = proto.mask[0];
+ type = IS2_TYPE_IP_UDP_TCP;
+ if (msk == 0xff && (val == 6 || val == 17)) {
+ /* UDP/TCP protocol match */
+ tcp = (val == 6 ?
+ OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_TCP, tcp);
+ vcap_key_l4_port_set(&data,
+ IS2_HKO_IP4_TCP_UDP_L4_DPORT,
+ dport);
+ vcap_key_l4_port_set(&data,
+ IS2_HKO_IP4_TCP_UDP_L4_SPORT,
+ sport);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_RNG);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_SPORT_EQ_DPORT,
+ sport_eq_dport);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_SEQUENCE_EQ0, seq_zero);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_FIN, tcp_fin);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_SYN, tcp_syn);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_RST, tcp_rst);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_PSH, tcp_psh);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_ACK, tcp_ack);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_URG, tcp_urg);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_DOM);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_VER);
+ } else {
+ if (msk == 0) {
+ /* Any IP protocol match */
+ type_mask = IS2_TYPE_MASK_IP_ANY;
+ } else {
+ /* Non-UDP/TCP protocol match */
+ type = IS2_TYPE_IP_OTHER;
+ for (i = 0; i < 6; i++) {
+ payload.value[i] = ip_data->value[i];
+ payload.mask[i] = ip_data->mask[i];
+ }
+ }
+ VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PROTO, proto.value,
+ proto.mask);
+ VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PAYLOAD, payload.value,
+ payload.mask);
+ }
+ break;
+ }
+ case OCELOT_ACE_TYPE_ANY:
+ default:
+ type = 0;
+ type_mask = 0;
+ count = (vcap_is2.entry_width / 2);
+ for (i = (IS2_HKO_PCP + IS2_HKL_PCP); i < count;
+ i += ENTRY_WIDTH) {
+ /* Clear entry data */
+ vcap_key_set(&data, i, min(32u, count - i), 0, 0);
+ }
+ break;
+ }
+
+ VCAP_KEY_SET(TYPE, type, type_mask);
+ is2_action_set(&data, ace->action);
+ vcap_data_set(data.counter, data.counter_offset, vcap_is2.counter_width,
+ ace->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, &data);
+ vcap_action2cache(ocelot, &data);
+ vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void is2_entry_get(struct ocelot_ace_rule *rule, int ix)
+{
+ struct ocelot *op = rule->port->ocelot;
+ struct vcap_data data;
+ int row = (ix / 2);
+ u32 cnt;
+
+ vcap_row_cmd(op, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(op, &data);
+ data.tg_sw = VCAP_TG_HALF;
+ is2_data_get(&data, ix);
+ cnt = vcap_data_get(data.counter, data.counter_offset,
+ vcap_is2.counter_width);
+
+ rule->stats.pkts = cnt;
+}
+
+static void ocelot_ace_rule_add(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ struct list_head *pos, *n;
+
+ block->count++;
+
+ if (list_empty(&block->rules)) {
+ list_add(&rule->list, &block->rules);
+ return;
+ }
+
+ list_for_each_safe(pos, n, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_ace_rule, list);
+ if (rule->prio < tmp->prio)
+ break;
+ }
+ list_add(&rule->list, pos->prev);
+}
+
+static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ int index = -1;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ ++index;
+ if (rule->id == tmp->id)
+ break;
+ }
+ return index;
+}
+
+static struct ocelot_ace_rule*
+ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
+{
+ struct ocelot_ace_rule *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *ace;
+ int i, index;
+
+ /* Add rule to the linked list */
+ ocelot_ace_rule_add(acl_block, rule);
+
+ /* Get the index of the inserted rule */
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+
+ /* Move down the rules to make place for the new rule */
+ for (i = acl_block->count - 1; i > index; i--) {
+ ace = ocelot_ace_rule_get_rule_index(acl_block, i);
+ is2_entry_set(rule->port->ocelot, i, ace);
+ }
+
+ /* Now insert the new rule */
+ is2_entry_set(rule->port->ocelot, index, rule);
+ return 0;
+}
+
+static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_ace_rule, list);
+ if (tmp->id == rule->id) {
+ list_del(pos);
+ kfree(tmp);
+ }
+ }
+
+ block->count--;
+}
+
+int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule del_ace;
+ struct ocelot_ace_rule *ace;
+ int i, index;
+
+ memset(&del_ace, 0, sizeof(del_ace));
+
+ /* Gets index of the rule */
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+
+ /* Delete rule */
+ ocelot_ace_rule_del(acl_block, rule);
+
+ /* Move up all the blocks over the deleted rule */
+ for (i = index; i < acl_block->count; i++) {
+ ace = ocelot_ace_rule_get_rule_index(acl_block, i);
+ is2_entry_set(rule->port->ocelot, i, ace);
+ }
+
+ /* Now delete the last rule, because it is duplicated */
+ is2_entry_set(rule->port->ocelot, acl_block->count, &del_ace);
+
+ return 0;
+}
+
+int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ int index;
+
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ is2_entry_get(rule, index);
+
+ /* After we get the result we need to clear the counters */
+ tmp = ocelot_ace_rule_get_rule_index(acl_block, index);
+ tmp->stats.pkts = 0;
+ is2_entry_set(rule->port->ocelot, index, tmp);
+
+ return 0;
+}
+
+static struct ocelot_acl_block *ocelot_acl_block_create(struct ocelot *ocelot)
+{
+ struct ocelot_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->rules);
+ block->count = 0;
+ block->ocelot = ocelot;
+
+ return block;
+}
+
+static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
+{
+ kfree(block);
+}
+
+int ocelot_ace_init(struct ocelot *ocelot)
+{
+ struct vcap_data data;
+
+ memset(&data, 0, sizeof(data));
+ vcap_entry2cache(ocelot, &data);
+ ocelot_write(ocelot, vcap_is2.entry_count, S2_CORE_MV_CFG);
+ vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
+
+ vcap_action2cache(ocelot, &data);
+ ocelot_write(ocelot, vcap_is2.action_count, S2_CORE_MV_CFG);
+ vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
+ VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
+
+ /* Create a policer that will drop the frames for the cpu.
+ * This policer will be used as action in the acl rules to drop
+ * frames.
+ */
+ ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
+ OCELOT_POLICER_DISCARD);
+
+ acl_block = ocelot_acl_block_create(ocelot);
+
+ return 0;
+}
+
+void ocelot_ace_deinit(void)
+{
+ ocelot_acl_block_destroy(acl_block);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
new file mode 100644
index 000000000000..d621683643e1
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ACE_H_
+#define _MSCC_OCELOT_ACE_H_
+
+#include "ocelot.h"
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+
+struct ocelot_ipv4 {
+ u8 addr[4];
+};
+
+enum ocelot_vcap_bit {
+ OCELOT_VCAP_BIT_ANY,
+ OCELOT_VCAP_BIT_0,
+ OCELOT_VCAP_BIT_1
+};
+
+struct ocelot_vcap_u8 {
+ u8 value[1];
+ u8 mask[1];
+};
+
+struct ocelot_vcap_u16 {
+ u8 value[2];
+ u8 mask[2];
+};
+
+struct ocelot_vcap_u24 {
+ u8 value[3];
+ u8 mask[3];
+};
+
+struct ocelot_vcap_u32 {
+ u8 value[4];
+ u8 mask[4];
+};
+
+struct ocelot_vcap_u40 {
+ u8 value[5];
+ u8 mask[5];
+};
+
+struct ocelot_vcap_u48 {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct ocelot_vcap_u64 {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct ocelot_vcap_u128 {
+ u8 value[16];
+ u8 mask[16];
+};
+
+struct ocelot_vcap_vid {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_ipv4 {
+ struct ocelot_ipv4 value;
+ struct ocelot_ipv4 mask;
+};
+
+struct ocelot_vcap_udp_tcp {
+ u16 value;
+ u16 mask;
+};
+
+enum ocelot_ace_type {
+ OCELOT_ACE_TYPE_ANY,
+ OCELOT_ACE_TYPE_ETYPE,
+ OCELOT_ACE_TYPE_LLC,
+ OCELOT_ACE_TYPE_SNAP,
+ OCELOT_ACE_TYPE_ARP,
+ OCELOT_ACE_TYPE_IPV4,
+ OCELOT_ACE_TYPE_IPV6
+};
+
+struct ocelot_ace_vlan {
+ struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+};
+
+struct ocelot_ace_frame_etype {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+ struct ocelot_vcap_u16 etype;
+ struct ocelot_vcap_u16 data; /* MAC data */
+};
+
+struct ocelot_ace_frame_llc {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
+ struct ocelot_vcap_u32 llc;
+};
+
+struct ocelot_ace_frame_snap {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* SNAP header: Organization Code at byte 0, Type at byte 3 */
+ struct ocelot_vcap_u40 snap;
+};
+
+struct ocelot_ace_frame_arp {
+ struct ocelot_vcap_u48 smac;
+ enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
+ enum ocelot_vcap_bit req; /* Opcode request/reply */
+ enum ocelot_vcap_bit unknown; /* Opcode unknown */
+ enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
+ enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
+
+ /**< Protocol addr. length 4, hardware length 6 */
+ enum ocelot_vcap_bit length;
+
+ enum ocelot_vcap_bit ip; /* Protocol address type IP */
+ enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
+ struct ocelot_vcap_ipv4 sip; /* Sender IP address */
+ struct ocelot_vcap_ipv4 dip; /* Target IP address */
+};
+
+struct ocelot_ace_frame_ipv4 {
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ enum ocelot_vcap_bit fragment; /* Fragment */
+ enum ocelot_vcap_bit options; /* Header options */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u8 proto; /* Protocol */
+ struct ocelot_vcap_ipv4 sip; /* Source IP address */
+ struct ocelot_vcap_ipv4 dip; /* Destination IP address */
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
+ struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+struct ocelot_ace_frame_ipv6 {
+ struct ocelot_vcap_u8 proto; /* IPv6 protocol */
+ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport;
+ struct ocelot_vcap_udp_tcp dport;
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+enum ocelot_ace_action {
+ OCELOT_ACL_ACTION_DROP,
+ OCELOT_ACL_ACTION_TRAP,
+};
+
+struct ocelot_ace_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct ocelot_ace_rule {
+ struct list_head list;
+ struct ocelot_port *port;
+
+ u16 prio;
+ u32 id;
+
+ enum ocelot_ace_action action;
+ struct ocelot_ace_stats stats;
+ int chip_port;
+
+ enum ocelot_vcap_bit dmac_mc;
+ enum ocelot_vcap_bit dmac_bc;
+ struct ocelot_ace_vlan vlan;
+
+ enum ocelot_ace_type type;
+ union {
+ /* ocelot_ACE_TYPE_ANY: No specific fields */
+ struct ocelot_ace_frame_etype etype;
+ struct ocelot_ace_frame_llc llc;
+ struct ocelot_ace_frame_snap snap;
+ struct ocelot_ace_frame_arp arp;
+ struct ocelot_ace_frame_ipv4 ipv4;
+ struct ocelot_ace_frame_ipv6 ipv6;
+ } frame;
+};
+
+struct ocelot_acl_block {
+ struct list_head rules;
+ struct ocelot *ocelot;
+ int count;
+};
+
+int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
+
+int ocelot_ace_init(struct ocelot *ocelot);
+void ocelot_ace_deinit(void);
+
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+ struct tc_block_offload *f);
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+ struct tc_block_offload *f);
+
+#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index e7f90101d2e0..58bde1a9eacb 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -188,6 +188,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
+ { S2, "s2" },
};
if (!np && !pdev->dev.platform_data)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
new file mode 100644
index 000000000000..8778dee5a471
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
+#include "ocelot_ace.h"
+
+struct ocelot_port_block {
+ struct ocelot_acl_block *block;
+ struct ocelot_port *port;
+};
+
+static u16 get_prio(u32 prio)
+{
+ /* prio starts from 0x1000 while the ids starts from 0 */
+ return prio >> 16;
+}
+
+static int ocelot_flower_parse_action(struct tc_cls_flower_offload *f,
+ struct ocelot_ace_rule *rule)
+{
+ const struct flow_action_entry *a;
+ int i;
+
+ if (f->rule->action.num_entries != 1)
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_DROP:
+ rule->action = OCELOT_ACL_ACTION_DROP;
+ break;
+ case FLOW_ACTION_TRAP:
+ rule->action = OCELOT_ACL_ACTION_TRAP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int ocelot_flower_parse(struct tc_cls_flower_offload *f,
+ struct ocelot_ace_rule *ocelot_rule)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ u16 proto = ntohs(f->common.protocol);
+
+ /* The hw support mac matches only for MAC_ETYPE key,
+ * therefore if other matches(port, tcp flags, etc) are added
+ * then just bail out
+ */
+ if ((dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL))) !=
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL)))
+ return -EOPNOTSUPP;
+
+ if (proto == ETH_P_IP ||
+ proto == ETH_P_IPV6 ||
+ proto == ETH_P_ARP)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ocelot_rule->type = OCELOT_ACE_TYPE_ETYPE;
+ ether_addr_copy(ocelot_rule->frame.etype.dmac.value,
+ match.key->dst);
+ ether_addr_copy(ocelot_rule->frame.etype.smac.value,
+ match.key->src);
+ ether_addr_copy(ocelot_rule->frame.etype.dmac.mask,
+ match.mask->dst);
+ ether_addr_copy(ocelot_rule->frame.etype.smac.mask,
+ match.mask->src);
+ goto finished_key_parsing;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (ntohs(match.key->n_proto) == ETH_P_IP) {
+ ocelot_rule->type = OCELOT_ACE_TYPE_IPV4;
+ ocelot_rule->frame.ipv4.proto.value[0] =
+ match.key->ip_proto;
+ ocelot_rule->frame.ipv4.proto.mask[0] =
+ match.mask->ip_proto;
+ }
+ if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
+ ocelot_rule->type = OCELOT_ACE_TYPE_IPV6;
+ ocelot_rule->frame.ipv6.proto.value[0] =
+ match.key->ip_proto;
+ ocelot_rule->frame.ipv6.proto.mask[0] =
+ match.mask->ip_proto;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
+ ntohs(f->common.protocol) == ETH_P_IP) {
+ struct flow_match_ipv4_addrs match;
+ u8 *tmp;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ tmp = &ocelot_rule->frame.ipv4.sip.value.addr[0];
+ memcpy(tmp, &match.key->src, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.sip.mask.addr[0];
+ memcpy(tmp, &match.mask->src, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.dip.value.addr[0];
+ memcpy(tmp, &match.key->dst, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.dip.mask.addr[0];
+ memcpy(tmp, &match.mask->dst, 4);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
+ ntohs(f->common.protocol) == ETH_P_IPV6) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ ocelot_rule->frame.ipv4.sport.value = ntohs(match.key->src);
+ ocelot_rule->frame.ipv4.sport.mask = ntohs(match.mask->src);
+ ocelot_rule->frame.ipv4.dport.value = ntohs(match.key->dst);
+ ocelot_rule->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ ocelot_rule->type = OCELOT_ACE_TYPE_ANY;
+ ocelot_rule->vlan.vid.value = match.key->vlan_id;
+ ocelot_rule->vlan.vid.mask = match.mask->vlan_id;
+ ocelot_rule->vlan.pcp.value[0] = match.key->vlan_priority;
+ ocelot_rule->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ }
+
+finished_key_parsing:
+ ocelot_rule->prio = get_prio(f->common.prio);
+ ocelot_rule->id = f->cookie;
+ return ocelot_flower_parse_action(f, ocelot_rule);
+}
+
+static
+struct ocelot_ace_rule *ocelot_ace_rule_create(struct tc_cls_flower_offload *f,
+ struct ocelot_port_block *block)
+{
+ struct ocelot_ace_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->port = block->port;
+ rule->chip_port = block->port->chip_port;
+ return rule;
+}
+
+static int ocelot_flower_replace(struct tc_cls_flower_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule *rule;
+ int ret;
+
+ rule = ocelot_ace_rule_create(f, port_block);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = ocelot_flower_parse(f, rule);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ ret = ocelot_ace_rule_offload_add(rule);
+ if (ret)
+ return ret;
+
+ port_block->port->tc.offload_cnt++;
+ return 0;
+}
+
+static int ocelot_flower_destroy(struct tc_cls_flower_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule rule;
+ int ret;
+
+ rule.prio = get_prio(f->common.prio);
+ rule.port = port_block->port;
+ rule.id = f->cookie;
+
+ ret = ocelot_ace_rule_offload_del(&rule);
+ if (ret)
+ return ret;
+
+ port_block->port->tc.offload_cnt--;
+ return 0;
+}
+
+static int ocelot_flower_stats_update(struct tc_cls_flower_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule rule;
+ int ret;
+
+ rule.prio = get_prio(f->common.prio);
+ rule.port = port_block->port;
+ rule.id = f->cookie;
+ ret = ocelot_ace_rule_stats_update(&rule);
+ if (ret)
+ return ret;
+
+ flow_stats_update(&f->stats, 0x0, rule.stats.pkts, 0x0);
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_flower(struct tc_cls_flower_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ switch (f->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return ocelot_flower_replace(f, port_block);
+ case TC_CLSFLOWER_DESTROY:
+ return ocelot_flower_destroy(f, port_block);
+ case TC_CLSFLOWER_STATS:
+ return ocelot_flower_stats_update(f, port_block);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct ocelot_port_block *port_block = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ocelot_setup_tc_cls_flower(type_data, cb_priv);
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ocelot_port_block*
+ocelot_port_block_create(struct ocelot_port *port)
+{
+ struct ocelot_port_block *port_block;
+
+ port_block = kzalloc(sizeof(*port_block), GFP_KERNEL);
+ if (!port_block)
+ return NULL;
+
+ port_block->port = port;
+
+ return port_block;
+}
+
+static void ocelot_port_block_destroy(struct ocelot_port_block *block)
+{
+ kfree(block);
+}
+
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+ struct tc_block_offload *f)
+{
+ struct ocelot_port_block *port_block;
+ struct tcf_block_cb *block_cb;
+ int ret;
+
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ block_cb = tcf_block_cb_lookup(f->block,
+ ocelot_setup_tc_block_cb_flower, port);
+ if (!block_cb) {
+ port_block = ocelot_port_block_create(port);
+ if (!port_block)
+ return -ENOMEM;
+
+ block_cb =
+ __tcf_block_cb_register(f->block,
+ ocelot_setup_tc_block_cb_flower,
+ port, port_block, f->extack);
+ if (IS_ERR(block_cb)) {
+ ret = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ port_block = tcf_block_cb_priv(block_cb);
+ }
+
+ tcf_block_cb_incref(block_cb);
+ return 0;
+
+err_cb_register:
+ ocelot_port_block_destroy(port_block);
+
+ return ret;
+}
+
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+ struct tc_block_offload *f)
+{
+ struct ocelot_port_block *port_block;
+ struct tcf_block_cb *block_cb;
+
+ block_cb = tcf_block_cb_lookup(f->block,
+ ocelot_setup_tc_block_cb_flower, port);
+ if (!block_cb)
+ return;
+
+ port_block = tcf_block_cb_priv(block_cb);
+ if (!tcf_block_cb_decref(block_cb)) {
+ tcf_block_cb_unregister(f->block,
+ ocelot_setup_tc_block_cb_flower, port);
+ ocelot_port_block_destroy(port_block);
+ }
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
new file mode 100644
index 000000000000..701e82dd749a
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include "ocelot_police.h"
+
+enum mscc_qos_rate_mode {
+ MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
+ MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */
+ MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */
+ MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */
+ __MSCC_QOS_RATE_MODE_END,
+ NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END,
+ MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1,
+};
+
+/* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */
+#define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_FRMRATE_HI 2 /* Unit: 33 1/3 fps, 32.8 frames */
+#define POL_MODE_FRMRATE_LO 3 /* Unit: 1/3 fps, 0.3 frames */
+
+/* Policer indexes */
+#define POL_IX_PORT 0 /* 0-11 : Port policers */
+#define POL_IX_QUEUE 32 /* 32-127 : Queue policers */
+
+/* Default policer order */
+#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
+
+struct qos_policer_conf {
+ enum mscc_qos_rate_mode mode;
+ bool dlb; /* Enable DLB (dual leaky bucket mode */
+ bool cf; /* Coupling flag (ignored in SLB mode) */
+ u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */
+ u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */
+ u32 pir; /* PIR in kbps/fps */
+ u32 pbs; /* PBS in bytes/frames */
+ u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
+};
+
+static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
+ struct qos_policer_conf *conf)
+{
+ u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
+ u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
+ bool cir_discard = 0, pir_discard = 0;
+ struct ocelot *ocelot = port->ocelot;
+ u32 pbs_max = 0, cbs_max = 0;
+ u8 ipg = 20;
+ u32 value;
+
+ pir = conf->pir;
+ pbs = conf->pbs;
+
+ switch (conf->mode) {
+ case MSCC_QOS_RATE_MODE_LINE:
+ case MSCC_QOS_RATE_MODE_DATA:
+ if (conf->mode == MSCC_QOS_RATE_MODE_LINE) {
+ frm_mode = POL_MODE_LINERATE;
+ ipg = min_t(u8, GENMASK(4, 0), conf->ipg);
+ } else {
+ frm_mode = POL_MODE_DATARATE;
+ }
+ if (conf->dlb) {
+ cir_ena = 1;
+ cir = conf->cir;
+ cbs = conf->cbs;
+ if (cir == 0 && cbs == 0) {
+ /* Discard cir frames */
+ cir_discard = 1;
+ } else {
+ cir = DIV_ROUND_UP(cir, 100);
+ cir *= 3; /* 33 1/3 kbps */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ cbs = (cbs ? cbs : 1); /* No zero burst size */
+ cbs_max = 60; /* Limit burst size */
+ cf = conf->cf;
+ if (cf)
+ pir += conf->cir;
+ }
+ }
+ if (pir == 0 && pbs == 0) {
+ /* Discard PIR frames */
+ pir_discard = 1;
+ } else {
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 kbps */
+ pbs = DIV_ROUND_UP(pbs, 4096);
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 60; /* Limit burst size */
+ }
+ break;
+ case MSCC_QOS_RATE_MODE_FRAME:
+ if (pir >= 100) {
+ frm_mode = POL_MODE_FRMRATE_HI;
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 fps */
+ pbs = (pbs * 10) / 328; /* 32.8 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = GENMASK(6, 0); /* Limit burst size */
+ } else {
+ frm_mode = POL_MODE_FRMRATE_LO;
+ if (pir == 0 && pbs == 0) {
+ /* Discard all frames */
+ pir_discard = 1;
+ cir_discard = 1;
+ } else {
+ pir *= 3; /* 1/3 fps */
+ pbs = (pbs * 10) / 3; /* 0.3 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 61; /* Limit burst size */
+ }
+ }
+ break;
+ default: /* MSCC_QOS_RATE_MODE_DISABLED */
+ /* Disable policer using maximum rate and zero burst */
+ pir = GENMASK(15, 0);
+ pbs = 0;
+ break;
+ }
+
+ /* Check limits */
+ if (pir > GENMASK(15, 0)) {
+ netdev_err(port->dev, "Invalid pir\n");
+ return -EINVAL;
+ }
+
+ if (cir > GENMASK(15, 0)) {
+ netdev_err(port->dev, "Invalid cir\n");
+ return -EINVAL;
+ }
+
+ if (pbs > pbs_max) {
+ netdev_err(port->dev, "Invalid pbs\n");
+ return -EINVAL;
+ }
+
+ if (cbs > cbs_max) {
+ netdev_err(port->dev, "Invalid cbs\n");
+ return -EINVAL;
+ }
+
+ value = (ANA_POL_MODE_CFG_IPG_SIZE(ipg) |
+ ANA_POL_MODE_CFG_FRM_MODE(frm_mode) |
+ (cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) |
+ (cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) |
+ ANA_POL_MODE_CFG_OVERSHOOT_ENA);
+
+ ocelot_write_gix(ocelot, value, ANA_POL_MODE_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_PIR_CFG_PIR_RATE(pir) |
+ ANA_POL_PIR_CFG_PIR_BURST(pbs),
+ ANA_POL_PIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (pir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_PIR_STATE, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_CIR_CFG_CIR_RATE(cir) |
+ ANA_POL_CIR_CFG_CIR_BURST(cbs),
+ ANA_POL_CIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (cir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_CIR_STATE, pol_ix);
+
+ return 0;
+}
+
+int ocelot_port_policer_add(struct ocelot_port *port,
+ struct ocelot_policer *pol)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ netdev_dbg(port->dev,
+ "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port->chip_port, pp.pir, pp.pbs);
+
+ err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port->chip_port);
+
+ return 0;
+}
+
+int ocelot_port_policer_del(struct ocelot_port *port)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port);
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+
+ err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port->chip_port);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
new file mode 100644
index 000000000000..d1137f79efda
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_POLICE_H_
+#define _MSCC_OCELOT_POLICE_H_
+
+#include "ocelot.h"
+
+struct ocelot_policer {
+ u32 rate; /* kilobit per second */
+ u32 burst; /* bytes */
+};
+
+int ocelot_port_policer_add(struct ocelot_port *port,
+ struct ocelot_policer *pol);
+
+int ocelot_port_policer_del(struct ocelot_port *port);
+
+#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 9271af18b93b..6c387f994ec5 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -224,12 +224,23 @@ static const u32 ocelot_sys_regmap[] = {
REG(SYS_PTP_CFG, 0x0006c4),
};
+static const u32 ocelot_s2_regmap[] = {
+ REG(S2_CORE_UPDATE_CTRL, 0x000000),
+ REG(S2_CORE_MV_CFG, 0x000004),
+ REG(S2_CACHE_ENTRY_DAT, 0x000008),
+ REG(S2_CACHE_MASK_DAT, 0x000108),
+ REG(S2_CACHE_ACTION_DAT, 0x000208),
+ REG(S2_CACHE_CNT_DAT, 0x000308),
+ REG(S2_CACHE_TG_DAT, 0x000388),
+};
+
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
+ [S2] = ocelot_s2_regmap,
};
static const struct reg_field ocelot_regfields[] = {
diff --git a/drivers/net/ethernet/mscc/ocelot_s2.h b/drivers/net/ethernet/mscc/ocelot_s2.h
new file mode 100644
index 000000000000..80107bec2e45
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_s2.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_S2_CORE_H_
+#define _OCELOT_S2_CORE_H_
+
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
+#define S2_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define S2_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define S2_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+
+#define S2_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
+#define S2_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
+#define S2_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define S2_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
+#define S2_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
+
+#define S2_CACHE_ENTRY_DAT_RSZ 0x4
+
+#define S2_CACHE_MASK_DAT_RSZ 0x4
+
+#define S2_CACHE_ACTION_DAT_RSZ 0x4
+
+#define S2_CACHE_CNT_DAT_RSZ 0x4
+
+#define S2_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+
+#define S2_BIST_CTRL_TCAM_BIST BIT(1)
+#define S2_BIST_CTRL_TCAM_INIT BIT(0)
+
+#define S2_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
+#define S2_BIST_CFG_TCAM_HCG_DIS BIT(7)
+#define S2_BIST_CFG_TCAM_CG_DIS BIT(6)
+#define S2_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
+#define S2_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
+
+#define S2_BIST_STAT_BIST_RT_ERR BIT(15)
+#define S2_BIST_STAT_BIST_PENC_ERR BIT(14)
+#define S2_BIST_STAT_BIST_COMP_ERR BIT(13)
+#define S2_BIST_STAT_BIST_ADDR_ERR BIT(12)
+#define S2_BIST_STAT_BIST_BL1E_ERR BIT(11)
+#define S2_BIST_STAT_BIST_BL1_ERR BIT(10)
+#define S2_BIST_STAT_BIST_BL0E_ERR BIT(9)
+#define S2_BIST_STAT_BIST_BL0_ERR BIT(8)
+#define S2_BIST_STAT_BIST_PH1_ERR BIT(7)
+#define S2_BIST_STAT_BIST_PH0_ERR BIT(6)
+#define S2_BIST_STAT_BIST_PV1_ERR BIT(5)
+#define S2_BIST_STAT_BIST_PV0_ERR BIT(4)
+#define S2_BIST_STAT_BIST_RUN BIT(3)
+#define S2_BIST_STAT_BIST_ERR BIT(2)
+#define S2_BIST_STAT_BIST_BUSY BIT(1)
+#define S2_BIST_STAT_TCAM_RDY BIT(0)
+
+#endif /* _OCELOT_S2_CORE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
new file mode 100644
index 000000000000..72084306240d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch TC driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include "ocelot_tc.h"
+#include "ocelot_police.h"
+#include "ocelot_ace.h"
+#include <net/pkt_cls.h>
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot_policer pol = { 0 };
+ struct flow_action_entry *action;
+ int err;
+
+ netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n",
+ __func__, port->chip_port, f->command, f->cookie);
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one action is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rate limit is not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ action = &f->rule->action.entries[0];
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported\n");
+ return -EEXIST;
+ }
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = (u32)div_u64(action->police.rate_bytes_ps *
+ PSCHED_NS2TICKS(action->police.burst),
+ PSCHED_TICKS_PER_SEC);
+
+ err = ocelot_port_policer_add(port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ return err;
+ }
+
+ port->tc.police_id = f->cookie;
+ port->tc.offload_cnt++;
+ return 0;
+ case TC_CLSMATCHALL_DESTROY:
+ if (port->tc.police_id != f->cookie)
+ return -ENOENT;
+
+ err = ocelot_port_policer_del(port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer\n");
+ return err;
+ }
+ port->tc.police_id = 0;
+ port->tc.offload_cnt--;
+ return 0;
+ case TC_CLSMATCHALL_STATS: /* fall through */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct ocelot_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
+ ingress ? "ingress" : "egress");
+
+ return ocelot_setup_tc_cls_matchall(port, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return 0;
+ default:
+ netdev_dbg(port->dev, "tc_block_cb: type %d %s\n",
+ type,
+ ingress ? "ingress" : "egress");
+
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, true);
+}
+
+static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, false);
+}
+
+static int ocelot_setup_tc_block(struct ocelot_port *port,
+ struct tc_block_offload *f)
+{
+ tc_setup_cb_t *cb;
+ int ret;
+
+ netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
+ f->command, f->binder_type);
+
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = ocelot_setup_tc_block_cb_ig;
+ port->tc.block_shared = tcf_block_shared(f->block);
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = ocelot_setup_tc_block_cb_eg;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ ret = tcf_block_cb_register(f->block, cb, port,
+ port, f->extack);
+ if (ret)
+ return ret;
+
+ return ocelot_setup_tc_block_flower_bind(port, f);
+ case TC_BLOCK_UNBIND:
+ ocelot_setup_tc_block_flower_unbind(port, f);
+ tcf_block_cb_unregister(f->block, cb, port);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return ocelot_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.h b/drivers/net/ethernet/mscc/ocelot_tc.h
new file mode 100644
index 000000000000..61757c2250a6
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_tc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_TC_H_
+#define _MSCC_OCELOT_TC_H_
+
+#include <linux/netdevice.h>
+
+struct ocelot_port_tc {
+ bool block_shared;
+ unsigned long offload_cnt;
+
+ unsigned long police_id;
+};
+
+int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* _MSCC_OCELOT_TC_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
new file mode 100644
index 000000000000..e22eac1da783
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_VCAP_H_
+#define _OCELOT_VCAP_H_
+
+/* =================================================================
+ * VCAP Common
+ * =================================================================
+ */
+
+/* VCAP Type-Group values */
+#define VCAP_TG_NONE 0 /* Entry is invalid */
+#define VCAP_TG_FULL 1 /* Full entry */
+#define VCAP_TG_HALF 2 /* Half entry */
+#define VCAP_TG_QUARTER 3 /* Quarter entry */
+
+/* =================================================================
+ * VCAP IS2
+ * =================================================================
+ */
+
+#define VCAP_IS2_CNT 64
+#define VCAP_IS2_ENTRY_WIDTH 376
+#define VCAP_IS2_ACTION_WIDTH 99
+#define VCAP_PORT_CNT 11
+
+/* IS2 half key types */
+#define IS2_TYPE_ETYPE 0
+#define IS2_TYPE_LLC 1
+#define IS2_TYPE_SNAP 2
+#define IS2_TYPE_ARP 3
+#define IS2_TYPE_IP_UDP_TCP 4
+#define IS2_TYPE_IP_OTHER 5
+#define IS2_TYPE_IPV6 6
+#define IS2_TYPE_OAM 7
+#define IS2_TYPE_SMAC_SIP6 8
+#define IS2_TYPE_ANY 100 /* Pseudo type */
+
+/* IS2 half key type mask for matching any IP */
+#define IS2_TYPE_MASK_IP_ANY 0xe
+
+/* IS2 action types */
+#define IS2_ACTION_TYPE_NORMAL 0
+#define IS2_ACTION_TYPE_SMAC_SIP 1
+
+/* IS2 MASK_MODE values */
+#define IS2_ACT_MASK_MODE_NONE 0
+#define IS2_ACT_MASK_MODE_FILTER 1
+#define IS2_ACT_MASK_MODE_POLICY 2
+#define IS2_ACT_MASK_MODE_REDIR 3
+
+/* IS2 REW_OP values */
+#define IS2_ACT_REW_OP_NONE 0
+#define IS2_ACT_REW_OP_PTP_ONE 2
+#define IS2_ACT_REW_OP_PTP_TWO 3
+#define IS2_ACT_REW_OP_SPECIAL 8
+#define IS2_ACT_REW_OP_PTP_ORG 9
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
+
+#define VCAP_PORT_WIDTH 4
+
+/* IS2 quarter key - SMAC_SIP4 */
+#define IS2_QKO_IGR_PORT 0
+#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
+#define IS2_QKL_L2_SMAC 48
+#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
+#define IS2_QKL_L3_IP4_SIP 32
+
+/* IS2 half key - common */
+#define IS2_HKO_TYPE 0
+#define IS2_HKL_TYPE 4
+#define IS2_HKO_FIRST (IS2_HKO_TYPE + IS2_HKL_TYPE)
+#define IS2_HKL_FIRST 1
+#define IS2_HKO_PAG (IS2_HKO_FIRST + IS2_HKL_FIRST)
+#define IS2_HKL_PAG 8
+#define IS2_HKO_IGR_PORT_MASK (IS2_HKO_PAG + IS2_HKL_PAG)
+#define IS2_HKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
+#define IS2_HKO_SERVICE_FRM (IS2_HKO_IGR_PORT_MASK + IS2_HKL_IGR_PORT_MASK)
+#define IS2_HKL_SERVICE_FRM 1
+#define IS2_HKO_HOST_MATCH (IS2_HKO_SERVICE_FRM + IS2_HKL_SERVICE_FRM)
+#define IS2_HKL_HOST_MATCH 1
+#define IS2_HKO_L2_MC (IS2_HKO_HOST_MATCH + IS2_HKL_HOST_MATCH)
+#define IS2_HKL_L2_MC 1
+#define IS2_HKO_L2_BC (IS2_HKO_L2_MC + IS2_HKL_L2_MC)
+#define IS2_HKL_L2_BC 1
+#define IS2_HKO_VLAN_TAGGED (IS2_HKO_L2_BC + IS2_HKL_L2_BC)
+#define IS2_HKL_VLAN_TAGGED 1
+#define IS2_HKO_VID (IS2_HKO_VLAN_TAGGED + IS2_HKL_VLAN_TAGGED)
+#define IS2_HKL_VID 12
+#define IS2_HKO_DEI (IS2_HKO_VID + IS2_HKL_VID)
+#define IS2_HKL_DEI 1
+#define IS2_HKO_PCP (IS2_HKO_DEI + IS2_HKL_DEI)
+#define IS2_HKL_PCP 3
+
+/* IS2 half key - MAC_ETYPE/MAC_LLC/MAC_SNAP/OAM common */
+#define IS2_HKO_L2_DMAC (IS2_HKO_PCP + IS2_HKL_PCP)
+#define IS2_HKL_L2_DMAC 48
+#define IS2_HKO_L2_SMAC (IS2_HKO_L2_DMAC + IS2_HKL_L2_DMAC)
+#define IS2_HKL_L2_SMAC 48
+
+/* IS2 half key - MAC_ETYPE */
+#define IS2_HKO_MAC_ETYPE_ETYPE (IS2_HKO_L2_SMAC + IS2_HKL_L2_SMAC)
+#define IS2_HKL_MAC_ETYPE_ETYPE 16
+#define IS2_HKO_MAC_ETYPE_L2_PAYLOAD \
+ (IS2_HKO_MAC_ETYPE_ETYPE + IS2_HKL_MAC_ETYPE_ETYPE)
+#define IS2_HKL_MAC_ETYPE_L2_PAYLOAD 27
+
+/* IS2 half key - MAC_LLC */
+#define IS2_HKO_MAC_LLC_L2_LLC IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_MAC_LLC_L2_LLC 40
+
+/* IS2 half key - MAC_SNAP */
+#define IS2_HKO_MAC_SNAP_L2_SNAP IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_MAC_SNAP_L2_SNAP 40
+
+/* IS2 half key - ARP */
+#define IS2_HKO_MAC_ARP_L2_SMAC IS2_HKO_L2_DMAC
+#define IS2_HKL_MAC_ARP_L2_SMAC 48
+#define IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK \
+ (IS2_HKO_MAC_ARP_L2_SMAC + IS2_HKL_MAC_ARP_L2_SMAC)
+#define IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK 1
+#define IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK \
+ (IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK + IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK)
+#define IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK 1
+#define IS2_HKO_MAC_ARP_ARP_LEN_OK \
+ (IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK + \
+ IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK)
+#define IS2_HKL_MAC_ARP_ARP_LEN_OK 1
+#define IS2_HKO_MAC_ARP_ARP_TGT_MATCH \
+ (IS2_HKO_MAC_ARP_ARP_LEN_OK + IS2_HKL_MAC_ARP_ARP_LEN_OK)
+#define IS2_HKL_MAC_ARP_ARP_TGT_MATCH 1
+#define IS2_HKO_MAC_ARP_ARP_SENDER_MATCH \
+ (IS2_HKO_MAC_ARP_ARP_TGT_MATCH + IS2_HKL_MAC_ARP_ARP_TGT_MATCH)
+#define IS2_HKL_MAC_ARP_ARP_SENDER_MATCH 1
+#define IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN \
+ (IS2_HKO_MAC_ARP_ARP_SENDER_MATCH + IS2_HKL_MAC_ARP_ARP_SENDER_MATCH)
+#define IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN 1
+#define IS2_HKO_MAC_ARP_ARP_OPCODE \
+ (IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN + \
+ IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN)
+#define IS2_HKL_MAC_ARP_ARP_OPCODE 2
+#define IS2_HKO_MAC_ARP_L3_IP4_DIP \
+ (IS2_HKO_MAC_ARP_ARP_OPCODE + IS2_HKL_MAC_ARP_ARP_OPCODE)
+#define IS2_HKL_MAC_ARP_L3_IP4_DIP 32
+#define IS2_HKO_MAC_ARP_L3_IP4_SIP \
+ (IS2_HKO_MAC_ARP_L3_IP4_DIP + IS2_HKL_MAC_ARP_L3_IP4_DIP)
+#define IS2_HKL_MAC_ARP_L3_IP4_SIP 32
+#define IS2_HKO_MAC_ARP_DIP_EQ_SIP \
+ (IS2_HKO_MAC_ARP_L3_IP4_SIP + IS2_HKL_MAC_ARP_L3_IP4_SIP)
+#define IS2_HKL_MAC_ARP_DIP_EQ_SIP 1
+
+/* IS2 half key - IP4_TCP_UDP/IP4_OTHER common */
+#define IS2_HKO_IP4 IS2_HKO_L2_DMAC
+#define IS2_HKL_IP4 1
+#define IS2_HKO_L3_FRAGMENT (IS2_HKO_IP4 + IS2_HKL_IP4)
+#define IS2_HKL_L3_FRAGMENT 1
+#define IS2_HKO_L3_FRAG_OFS_GT0 (IS2_HKO_L3_FRAGMENT + IS2_HKL_L3_FRAGMENT)
+#define IS2_HKL_L3_FRAG_OFS_GT0 1
+#define IS2_HKO_L3_OPTIONS (IS2_HKO_L3_FRAG_OFS_GT0 + IS2_HKL_L3_FRAG_OFS_GT0)
+#define IS2_HKL_L3_OPTIONS 1
+#define IS2_HKO_L3_TTL_GT0 (IS2_HKO_L3_OPTIONS + IS2_HKL_L3_OPTIONS)
+#define IS2_HKL_L3_TTL_GT0 1
+#define IS2_HKO_L3_TOS (IS2_HKO_L3_TTL_GT0 + IS2_HKL_L3_TTL_GT0)
+#define IS2_HKL_L3_TOS 8
+#define IS2_HKO_L3_IP4_DIP (IS2_HKO_L3_TOS + IS2_HKL_L3_TOS)
+#define IS2_HKL_L3_IP4_DIP 32
+#define IS2_HKO_L3_IP4_SIP (IS2_HKO_L3_IP4_DIP + IS2_HKL_L3_IP4_DIP)
+#define IS2_HKL_L3_IP4_SIP 32
+#define IS2_HKO_DIP_EQ_SIP (IS2_HKO_L3_IP4_SIP + IS2_HKL_L3_IP4_SIP)
+#define IS2_HKL_DIP_EQ_SIP 1
+
+/* IS2 half key - IP4_TCP_UDP */
+#define IS2_HKO_IP4_TCP_UDP_TCP (IS2_HKO_DIP_EQ_SIP + IS2_HKL_DIP_EQ_SIP)
+#define IS2_HKL_IP4_TCP_UDP_TCP 1
+#define IS2_HKO_IP4_TCP_UDP_L4_DPORT \
+ (IS2_HKO_IP4_TCP_UDP_TCP + IS2_HKL_IP4_TCP_UDP_TCP)
+#define IS2_HKL_IP4_TCP_UDP_L4_DPORT 16
+#define IS2_HKO_IP4_TCP_UDP_L4_SPORT \
+ (IS2_HKO_IP4_TCP_UDP_L4_DPORT + IS2_HKL_IP4_TCP_UDP_L4_DPORT)
+#define IS2_HKL_IP4_TCP_UDP_L4_SPORT 16
+#define IS2_HKO_IP4_TCP_UDP_L4_RNG \
+ (IS2_HKO_IP4_TCP_UDP_L4_SPORT + IS2_HKL_IP4_TCP_UDP_L4_SPORT)
+#define IS2_HKL_IP4_TCP_UDP_L4_RNG 8
+#define IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT \
+ (IS2_HKO_IP4_TCP_UDP_L4_RNG + IS2_HKL_IP4_TCP_UDP_L4_RNG)
+#define IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT 1
+#define IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 \
+ (IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT + \
+ IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT)
+#define IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0 1
+#define IS2_HKO_IP4_TCP_UDP_L4_FIN \
+ (IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 + IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0)
+#define IS2_HKL_IP4_TCP_UDP_L4_FIN 1
+#define IS2_HKO_IP4_TCP_UDP_L4_SYN \
+ (IS2_HKO_IP4_TCP_UDP_L4_FIN + IS2_HKL_IP4_TCP_UDP_L4_FIN)
+#define IS2_HKL_IP4_TCP_UDP_L4_SYN 1
+#define IS2_HKO_IP4_TCP_UDP_L4_RST \
+ (IS2_HKO_IP4_TCP_UDP_L4_SYN + IS2_HKL_IP4_TCP_UDP_L4_SYN)
+#define IS2_HKL_IP4_TCP_UDP_L4_RST 1
+#define IS2_HKO_IP4_TCP_UDP_L4_PSH \
+ (IS2_HKO_IP4_TCP_UDP_L4_RST + IS2_HKL_IP4_TCP_UDP_L4_RST)
+#define IS2_HKL_IP4_TCP_UDP_L4_PSH 1
+#define IS2_HKO_IP4_TCP_UDP_L4_ACK \
+ (IS2_HKO_IP4_TCP_UDP_L4_PSH + IS2_HKL_IP4_TCP_UDP_L4_PSH)
+#define IS2_HKL_IP4_TCP_UDP_L4_ACK 1
+#define IS2_HKO_IP4_TCP_UDP_L4_URG \
+ (IS2_HKO_IP4_TCP_UDP_L4_ACK + IS2_HKL_IP4_TCP_UDP_L4_ACK)
+#define IS2_HKL_IP4_TCP_UDP_L4_URG 1
+#define IS2_HKO_IP4_TCP_UDP_L4_1588_DOM \
+ (IS2_HKO_IP4_TCP_UDP_L4_URG + IS2_HKL_IP4_TCP_UDP_L4_URG)
+#define IS2_HKL_IP4_TCP_UDP_L4_1588_DOM 8
+#define IS2_HKO_IP4_TCP_UDP_L4_1588_VER \
+ (IS2_HKO_IP4_TCP_UDP_L4_1588_DOM + IS2_HKL_IP4_TCP_UDP_L4_1588_DOM)
+#define IS2_HKL_IP4_TCP_UDP_L4_1588_VER 4
+
+/* IS2 half key - IP4_OTHER */
+#define IS2_HKO_IP4_OTHER_L3_PROTO IS2_HKO_IP4_TCP_UDP_TCP
+#define IS2_HKL_IP4_OTHER_L3_PROTO 8
+#define IS2_HKO_IP4_OTHER_L3_PAYLOAD \
+ (IS2_HKO_IP4_OTHER_L3_PROTO + IS2_HKL_IP4_OTHER_L3_PROTO)
+#define IS2_HKL_IP4_OTHER_L3_PAYLOAD 56
+
+/* IS2 half key - IP6_STD */
+#define IS2_HKO_IP6_STD_L3_TTL_GT0 IS2_HKO_L2_DMAC
+#define IS2_HKL_IP6_STD_L3_TTL_GT0 1
+#define IS2_HKO_IP6_STD_L3_IP6_SIP \
+ (IS2_HKO_IP6_STD_L3_TTL_GT0 + IS2_HKL_IP6_STD_L3_TTL_GT0)
+#define IS2_HKL_IP6_STD_L3_IP6_SIP 128
+#define IS2_HKO_IP6_STD_L3_PROTO \
+ (IS2_HKO_IP6_STD_L3_IP6_SIP + IS2_HKL_IP6_STD_L3_IP6_SIP)
+#define IS2_HKL_IP6_STD_L3_PROTO 8
+
+/* IS2 half key - OAM */
+#define IS2_HKO_OAM_OAM_MEL_FLAGS IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_OAM_OAM_MEL_FLAGS 7
+#define IS2_HKO_OAM_OAM_VER \
+ (IS2_HKO_OAM_OAM_MEL_FLAGS + IS2_HKL_OAM_OAM_MEL_FLAGS)
+#define IS2_HKL_OAM_OAM_VER 5
+#define IS2_HKO_OAM_OAM_OPCODE (IS2_HKO_OAM_OAM_VER + IS2_HKL_OAM_OAM_VER)
+#define IS2_HKL_OAM_OAM_OPCODE 8
+#define IS2_HKO_OAM_OAM_FLAGS (IS2_HKO_OAM_OAM_OPCODE + IS2_HKL_OAM_OAM_OPCODE)
+#define IS2_HKL_OAM_OAM_FLAGS 8
+#define IS2_HKO_OAM_OAM_MEPID (IS2_HKO_OAM_OAM_FLAGS + IS2_HKL_OAM_OAM_FLAGS)
+#define IS2_HKL_OAM_OAM_MEPID 16
+#define IS2_HKO_OAM_OAM_CCM_CNTS_EQ0 \
+ (IS2_HKO_OAM_OAM_MEPID + IS2_HKL_OAM_OAM_MEPID)
+#define IS2_HKL_OAM_OAM_CCM_CNTS_EQ0 1
+
+/* IS2 half key - SMAC_SIP6 */
+#define IS2_HKO_SMAC_SIP6_IGR_PORT IS2_HKL_TYPE
+#define IS2_HKL_SMAC_SIP6_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_HKO_SMAC_SIP6_L2_SMAC \
+ (IS2_HKO_SMAC_SIP6_IGR_PORT + IS2_HKL_SMAC_SIP6_IGR_PORT)
+#define IS2_HKL_SMAC_SIP6_L2_SMAC 48
+#define IS2_HKO_SMAC_SIP6_L3_IP6_SIP \
+ (IS2_HKO_SMAC_SIP6_L2_SMAC + IS2_HKL_SMAC_SIP6_L2_SMAC)
+#define IS2_HKL_SMAC_SIP6_L3_IP6_SIP 128
+
+/* IS2 full key - common */
+#define IS2_FKO_TYPE 0
+#define IS2_FKL_TYPE 2
+#define IS2_FKO_FIRST (IS2_FKO_TYPE + IS2_FKL_TYPE)
+#define IS2_FKL_FIRST 1
+#define IS2_FKO_PAG (IS2_FKO_FIRST + IS2_FKL_FIRST)
+#define IS2_FKL_PAG 8
+#define IS2_FKO_IGR_PORT_MASK (IS2_FKO_PAG + IS2_FKL_PAG)
+#define IS2_FKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
+#define IS2_FKO_SERVICE_FRM (IS2_FKO_IGR_PORT_MASK + IS2_FKL_IGR_PORT_MASK)
+#define IS2_FKL_SERVICE_FRM 1
+#define IS2_FKO_HOST_MATCH (IS2_FKO_SERVICE_FRM + IS2_FKL_SERVICE_FRM)
+#define IS2_FKL_HOST_MATCH 1
+#define IS2_FKO_L2_MC (IS2_FKO_HOST_MATCH + IS2_FKL_HOST_MATCH)
+#define IS2_FKL_L2_MC 1
+#define IS2_FKO_L2_BC (IS2_FKO_L2_MC + IS2_FKL_L2_MC)
+#define IS2_FKL_L2_BC 1
+#define IS2_FKO_VLAN_TAGGED (IS2_FKO_L2_BC + IS2_FKL_L2_BC)
+#define IS2_FKL_VLAN_TAGGED 1
+#define IS2_FKO_VID (IS2_FKO_VLAN_TAGGED + IS2_FKL_VLAN_TAGGED)
+#define IS2_FKL_VID 12
+#define IS2_FKO_DEI (IS2_FKO_VID + IS2_FKL_VID)
+#define IS2_FKL_DEI 1
+#define IS2_FKO_PCP (IS2_FKO_DEI + IS2_FKL_DEI)
+#define IS2_FKL_PCP 3
+
+/* IS2 full key - IP6_TCP_UDP/IP6_OTHER common */
+#define IS2_FKO_L3_TTL_GT0 (IS2_FKO_PCP + IS2_FKL_PCP)
+#define IS2_FKL_L3_TTL_GT0 1
+#define IS2_FKO_L3_TOS (IS2_FKO_L3_TTL_GT0 + IS2_FKL_L3_TTL_GT0)
+#define IS2_FKL_L3_TOS 8
+#define IS2_FKO_L3_IP6_DIP (IS2_FKO_L3_TOS + IS2_FKL_L3_TOS)
+#define IS2_FKL_L3_IP6_DIP 128
+#define IS2_FKO_L3_IP6_SIP (IS2_FKO_L3_IP6_DIP + IS2_FKL_L3_IP6_DIP)
+#define IS2_FKL_L3_IP6_SIP 128
+#define IS2_FKO_DIP_EQ_SIP (IS2_FKO_L3_IP6_SIP + IS2_FKL_L3_IP6_SIP)
+#define IS2_FKL_DIP_EQ_SIP 1
+
+/* IS2 full key - IP6_TCP_UDP */
+#define IS2_FKO_IP6_TCP_UDP_TCP (IS2_FKO_DIP_EQ_SIP + IS2_FKL_DIP_EQ_SIP)
+#define IS2_FKL_IP6_TCP_UDP_TCP 1
+#define IS2_FKO_IP6_TCP_UDP_L4_DPORT \
+ (IS2_FKO_IP6_TCP_UDP_TCP + IS2_FKL_IP6_TCP_UDP_TCP)
+#define IS2_FKL_IP6_TCP_UDP_L4_DPORT 16
+#define IS2_FKO_IP6_TCP_UDP_L4_SPORT \
+ (IS2_FKO_IP6_TCP_UDP_L4_DPORT + IS2_FKL_IP6_TCP_UDP_L4_DPORT)
+#define IS2_FKL_IP6_TCP_UDP_L4_SPORT 16
+#define IS2_FKO_IP6_TCP_UDP_L4_RNG \
+ (IS2_FKO_IP6_TCP_UDP_L4_SPORT + IS2_FKL_IP6_TCP_UDP_L4_SPORT)
+#define IS2_FKL_IP6_TCP_UDP_L4_RNG 8
+#define IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT \
+ (IS2_FKO_IP6_TCP_UDP_L4_RNG + IS2_FKL_IP6_TCP_UDP_L4_RNG)
+#define IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT 1
+#define IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 \
+ (IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT + \
+ IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT)
+#define IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0 1
+#define IS2_FKO_IP6_TCP_UDP_L4_FIN \
+ (IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 + IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0)
+#define IS2_FKL_IP6_TCP_UDP_L4_FIN 1
+#define IS2_FKO_IP6_TCP_UDP_L4_SYN \
+ (IS2_FKO_IP6_TCP_UDP_L4_FIN + IS2_FKL_IP6_TCP_UDP_L4_FIN)
+#define IS2_FKL_IP6_TCP_UDP_L4_SYN 1
+#define IS2_FKO_IP6_TCP_UDP_L4_RST \
+ (IS2_FKO_IP6_TCP_UDP_L4_SYN + IS2_FKL_IP6_TCP_UDP_L4_SYN)
+#define IS2_FKL_IP6_TCP_UDP_L4_RST 1
+#define IS2_FKO_IP6_TCP_UDP_L4_PSH \
+ (IS2_FKO_IP6_TCP_UDP_L4_RST + IS2_FKL_IP6_TCP_UDP_L4_RST)
+#define IS2_FKL_IP6_TCP_UDP_L4_PSH 1
+#define IS2_FKO_IP6_TCP_UDP_L4_ACK \
+ (IS2_FKO_IP6_TCP_UDP_L4_PSH + IS2_FKL_IP6_TCP_UDP_L4_PSH)
+#define IS2_FKL_IP6_TCP_UDP_L4_ACK 1
+#define IS2_FKO_IP6_TCP_UDP_L4_URG \
+ (IS2_FKO_IP6_TCP_UDP_L4_ACK + IS2_FKL_IP6_TCP_UDP_L4_ACK)
+#define IS2_FKL_IP6_TCP_UDP_L4_URG 1
+#define IS2_FKO_IP6_TCP_UDP_L4_1588_DOM \
+ (IS2_FKO_IP6_TCP_UDP_L4_URG + IS2_FKL_IP6_TCP_UDP_L4_URG)
+#define IS2_FKL_IP6_TCP_UDP_L4_1588_DOM 8
+#define IS2_FKO_IP6_TCP_UDP_L4_1588_VER \
+ (IS2_FKO_IP6_TCP_UDP_L4_1588_DOM + IS2_FKL_IP6_TCP_UDP_L4_1588_DOM)
+#define IS2_FKL_IP6_TCP_UDP_L4_1588_VER 4
+
+/* IS2 full key - IP6_OTHER */
+#define IS2_FKO_IP6_OTHER_L3_PROTO IS2_FKO_IP6_TCP_UDP_TCP
+#define IS2_FKL_IP6_OTHER_L3_PROTO 8
+#define IS2_FKO_IP6_OTHER_L3_PAYLOAD \
+ (IS2_FKO_IP6_OTHER_L3_PROTO + IS2_FKL_IP6_OTHER_L3_PROTO)
+#define IS2_FKL_IP6_OTHER_L3_PAYLOAD 56
+
+/* IS2 full key - CUSTOM */
+#define IS2_FKO_CUSTOM_CUSTOM_TYPE IS2_FKO_L3_TTL_GT0
+#define IS2_FKL_CUSTOM_CUSTOM_TYPE 1
+#define IS2_FKO_CUSTOM_CUSTOM \
+ (IS2_FKO_CUSTOM_CUSTOM_TYPE + IS2_FKL_CUSTOM_CUSTOM_TYPE)
+#define IS2_FKL_CUSTOM_CUSTOM 320
+
+/* IS2 action - BASE_TYPE */
+#define IS2_AO_HIT_ME_ONCE 0
+#define IS2_AL_HIT_ME_ONCE 1
+#define IS2_AO_CPU_COPY_ENA (IS2_AO_HIT_ME_ONCE + IS2_AL_HIT_ME_ONCE)
+#define IS2_AL_CPU_COPY_ENA 1
+#define IS2_AO_CPU_QU_NUM (IS2_AO_CPU_COPY_ENA + IS2_AL_CPU_COPY_ENA)
+#define IS2_AL_CPU_QU_NUM 3
+#define IS2_AO_MASK_MODE (IS2_AO_CPU_QU_NUM + IS2_AL_CPU_QU_NUM)
+#define IS2_AL_MASK_MODE 2
+#define IS2_AO_MIRROR_ENA (IS2_AO_MASK_MODE + IS2_AL_MASK_MODE)
+#define IS2_AL_MIRROR_ENA 1
+#define IS2_AO_LRN_DIS (IS2_AO_MIRROR_ENA + IS2_AL_MIRROR_ENA)
+#define IS2_AL_LRN_DIS 1
+#define IS2_AO_POLICE_ENA (IS2_AO_LRN_DIS + IS2_AL_LRN_DIS)
+#define IS2_AL_POLICE_ENA 1
+#define IS2_AO_POLICE_IDX (IS2_AO_POLICE_ENA + IS2_AL_POLICE_ENA)
+#define IS2_AL_POLICE_IDX 9
+#define IS2_AO_POLICE_VCAP_ONLY (IS2_AO_POLICE_IDX + IS2_AL_POLICE_IDX)
+#define IS2_AL_POLICE_VCAP_ONLY 1
+#define IS2_AO_PORT_MASK (IS2_AO_POLICE_VCAP_ONLY + IS2_AL_POLICE_VCAP_ONLY)
+#define IS2_AL_PORT_MASK VCAP_PORT_CNT
+#define IS2_AO_REW_OP (IS2_AO_PORT_MASK + IS2_AL_PORT_MASK)
+#define IS2_AL_REW_OP 9
+#define IS2_AO_LM_CNT_DIS (IS2_AO_REW_OP + IS2_AL_REW_OP)
+#define IS2_AL_LM_CNT_DIS 1
+#define IS2_AO_ISDX_ENA \
+ (IS2_AO_LM_CNT_DIS + IS2_AL_LM_CNT_DIS + 1) /* Reserved bit */
+#define IS2_AL_ISDX_ENA 1
+#define IS2_AO_ACL_ID (IS2_AO_ISDX_ENA + IS2_AL_ISDX_ENA)
+#define IS2_AL_ACL_ID 6
+
+/* IS2 action - SMAC_SIP */
+#define IS2_AO_SMAC_SIP_CPU_COPY_ENA 0
+#define IS2_AL_SMAC_SIP_CPU_COPY_ENA 1
+#define IS2_AO_SMAC_SIP_CPU_QU_NUM 1
+#define IS2_AL_SMAC_SIP_CPU_QU_NUM 3
+#define IS2_AO_SMAC_SIP_FWD_KILL_ENA 4
+#define IS2_AL_SMAC_SIP_FWD_KILL_ENA 1
+#define IS2_AO_SMAC_SIP_HOST_MATCH 5
+#define IS2_AL_SMAC_SIP_HOST_MATCH 1
+
+#endif /* _OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 4ad5109059e0..bac5be4d4f43 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -20,6 +20,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 87bf784f8e8f..2805641965f3 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -16,6 +16,7 @@ nfp-objs := \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
ccm.o \
+ ccm_mbox.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -34,6 +35,11 @@ nfp-objs := \
nfp_shared_buf.o \
nic/main.o
+ifeq ($(CONFIG_TLS_DEVICE),y)
+nfp-objs += \
+ crypto/tls.o
+endif
+
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index d4bf0e694541..4054b70d7719 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -623,6 +623,13 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
static void
+wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst)
+{
+ if (meta->flags & FLAG_INSN_DO_ZEXT)
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+}
+
+static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
@@ -858,7 +865,8 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
static int
-data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
+data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset,
+ u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
@@ -881,14 +889,15 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
- swreg lreg, swreg rreg, int size, enum cmd_mode mode)
+data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 dst_gpr, swreg lreg, swreg rreg, int size,
+ enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
@@ -911,33 +920,34 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
- return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
- size, CMD_MODE_32b);
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, reg_a(src_gpr),
+ offset, size, CMD_MODE_32b);
}
static int
-data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
swreg rega, regb;
addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
- return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, rega, regb,
size, CMD_MODE_40b_BA);
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -953,10 +963,12 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
- return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+ return data_ld(nfp_prog, meta, imm_b(nfp_prog), 0, size);
}
-static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+static int
+construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u8 size)
{
swreg tmp_reg;
@@ -967,7 +979,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- return data_ld(nfp_prog, tmp_reg, 0, size);
+ return data_ld(nfp_prog, meta, tmp_reg, 0, size);
}
static int
@@ -1204,7 +1216,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
if (clr_gpr && size < 8)
- wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, gpr);
while (size) {
u32 slice_end;
@@ -1305,9 +1317,10 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
- wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ wrp_alu_imm(nfp_prog, dst, alu_op, insn->imm);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -1319,7 +1332,7 @@ wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2396,12 +2409,14 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst = meta->insn.dst_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt) {
/* Set signedness bit (MSB of result). */
@@ -2410,7 +2425,7 @@ static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
}
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2425,7 +2440,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __ashr_imm(nfp_prog, dst, umin);
+ return __ashr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
/* NOTE: the first insn will set both indirect shift amount (source A)
@@ -2434,7 +2449,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2444,15 +2459,17 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __ashr_imm(nfp_prog, dst, insn->imm);
+ return __ashr_imm(nfp_prog, meta, dst, insn->imm);
}
-static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2461,7 +2478,7 @@ static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shr_imm(nfp_prog, dst, insn->imm);
+ return __shr_imm(nfp_prog, meta, dst, insn->imm);
}
static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2474,22 +2491,24 @@ static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shr_imm(nfp_prog, dst, umin);
+ return __shr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_L_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2498,7 +2517,7 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shl_imm(nfp_prog, dst, insn->imm);
+ return __shl_imm(nfp_prog, meta, dst, insn->imm);
}
static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2511,11 +2530,11 @@ static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shl_imm(nfp_prog, dst, umin);
+ return __shl_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
shl_reg64_lt32_low(nfp_prog, dst, src);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2577,34 +2596,34 @@ static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 1);
}
static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 2);
}
static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 4);
}
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 4);
}
@@ -2682,7 +2701,7 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr32(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2694,7 +2713,7 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr40(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2755,7 +2774,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
if (!len_mid) {
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
@@ -2763,7 +2782,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
if (size <= REG_WIDTH) {
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 2);
@@ -2794,10 +2813,10 @@ mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
if (size < REG_WIDTH) {
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else if (size == REG_WIDTH) {
wrp_mov(nfp_prog, dst_lo, src_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 1);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index e54d1ac84df2..57d6ff51e980 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -238,6 +238,8 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
/* Instruction is optimized by the verifier */
#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+/* Instruction needs to zero extend to high 32-bit */
+#define FLAG_INSN_DO_ZEXT BIT(6)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT | \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 36f56eb4cbe2..e92ee510fd52 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -744,6 +744,17 @@ continue_subprog:
goto continue_subprog;
}
+static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
+ struct bpf_insn_aux_data *aux)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (aux[meta->n].zext_dst)
+ meta->flags |= FLAG_INSN_DO_ZEXT;
+ }
+}
+
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
@@ -784,6 +795,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return -EOPNOTSUPP;
}
+ nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
index 94476e41e261..71afd111bae3 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.c
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -7,9 +7,6 @@
#include "nfp_app.h"
#include "nfp_net.h"
-#define NFP_CCM_TYPE_REPLY_BIT 7
-#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
-
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index ac963b128203..01efa779ab31 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -9,6 +9,7 @@
#include <linux/wait.h>
struct nfp_app;
+struct nfp_net;
/* Firmware ABI */
@@ -21,15 +22,27 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ NFP_CCM_TYPE_CRYPTO_RESET = 9,
+ NFP_CCM_TYPE_CRYPTO_ADD = 10,
+ NFP_CCM_TYPE_CRYPTO_DEL = 11,
+ NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
struct nfp_ccm_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
+ union {
+ struct {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+ };
+ __be32 raw;
+ };
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
@@ -41,15 +54,31 @@ static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
return hdr->type;
}
-static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
- return be16_to_cpu(hdr->tag);
+ return hdr->tag;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ return be16_to_cpu(__nfp_ccm_get_tag(skb));
}
+#define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16)
+#define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0)
+
+enum nfp_ccm_mbox_tlv_type {
+ NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0,
+ NFP_NET_MBOX_TLV_TYPE_END = 1,
+ NFP_NET_MBOX_TLV_TYPE_MSG = 2,
+ NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3,
+ NFP_NET_MBOX_TLV_TYPE_RESV = 4,
+};
+
/* Implementation */
/**
@@ -80,4 +109,13 @@ void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
+
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
+struct sk_buff *
+nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags);
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
new file mode 100644
index 000000000000..e5acd96c3335
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+
+#include "ccm.h"
+#include "nfp_net.h"
+
+/* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs
+ * and copied into the mailbox. Multiple messages can be copied to
+ * form a batch. Threads come in with CMSG formed in an skb, then
+ * enqueue that skb onto the request queue. If threads skb is first
+ * in queue this thread will handle the mailbox operation. It copies
+ * up to 16 messages into the mailbox (making sure that both requests
+ * and replies will fit. After FW is done processing the batch it
+ * copies the data out and wakes waiting threads.
+ * If a thread is waiting it either gets its the message completed
+ * (response is copied into the same skb as the request, overwriting
+ * it), or becomes the first in queue.
+ * Completions and next-to-run are signaled via the control buffer
+ * to limit potential cache line bounces.
+ */
+
+#define NFP_CCM_MBOX_BATCH_LIMIT 16
+#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
+#define NFP_CCM_MAX_QLEN 256
+
+enum nfp_net_mbox_cmsg_state {
+ NFP_NET_MBOX_CMSG_STATE_QUEUED,
+ NFP_NET_MBOX_CMSG_STATE_NEXT,
+ NFP_NET_MBOX_CMSG_STATE_BUSY,
+ NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
+ NFP_NET_MBOX_CMSG_STATE_DONE,
+};
+
+/**
+ * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
+ * @state: processing state (/stage) of the message
+ * @err: error encountered during processing if any
+ * @max_len: max(request_len, reply_len)
+ * @exp_reply: expected reply length (0 means don't validate)
+ */
+struct nfp_ccm_mbox_cmsg_cb {
+ enum nfp_net_mbox_cmsg_state state;
+ int err;
+ unsigned int max_len;
+ unsigned int exp_reply;
+};
+
+static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
+{
+ return round_down(nn->tlv_caps.mbox_len, 4) -
+ NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
+ 4 * 2; /* Msg TLV plus End TLV headers */
+}
+
+static void
+nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
+ cb->err = 0;
+ cb->max_len = max_len;
+ cb->exp_reply = exp_reply;
+}
+
+static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->max_len;
+}
+
+static bool nfp_ccm_mbox_done(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
+}
+
+static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
+ cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
+}
+
+static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
+{
+ return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
+}
+
+static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (!skb)
+ return;
+
+ cb = (void *)skb->cb;
+ cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void
+nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
+{
+ nn_writel(nn, off,
+ FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
+ FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
+}
+
+static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+ int reserve, i, cnt;
+ __be32 *data;
+ u32 off, len;
+
+ off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
+ skb->len);
+ off += 4;
+
+ /* Write data word by word, skb->data should be aligned */
+ data = (__be32 *)skb->data;
+ cnt = skb->len / 4;
+ for (i = 0 ; i < cnt; i++) {
+ nn_writel(nn, off, be32_to_cpu(data[i]));
+ off += 4;
+ }
+ if (skb->len & 3) {
+ __be32 tmp = 0;
+
+ memcpy(&tmp, &data[i], skb->len & 3);
+ nn_writel(nn, off, be32_to_cpu(tmp));
+ off += 4;
+ }
+
+ /* Reserve space if reply is bigger */
+ len = round_up(skb->len, 4);
+ reserve = nfp_ccm_mbox_maxlen(skb) - len;
+ if (reserve > 0) {
+ nfp_ccm_mbox_write_tlv(nn, off,
+ NFP_NET_MBOX_TLV_TYPE_RESV,
+ reserve);
+ off += 4 + reserve;
+ }
+
+ if (skb == last)
+ break;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
+}
+
+static struct sk_buff *
+nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ if (__nfp_ccm_get_tag(skb) == tag)
+ return skb;
+
+ if (skb == last)
+ return NULL;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+}
+
+static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ u8 __iomem *data, *end;
+ struct sk_buff *skb;
+
+ data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ end = data + nn->tlv_caps.mbox_len;
+
+ while (true) {
+ unsigned int length, offset, type;
+ struct nfp_ccm_hdr hdr;
+ __be32 *skb_data;
+ u32 tlv_hdr;
+ int i, cnt;
+
+ tlv_hdr = readl(data);
+ type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
+ length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
+ offset = data - nn->dp.ctrl_bar;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (data + length > end) {
+ nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_END)
+ break;
+ if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
+ goto next_tlv;
+ if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
+ type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (length < 4) {
+ nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ hdr.raw = cpu_to_be32(readl(data));
+
+ skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
+ if (!skb) {
+ nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
+ be16_to_cpu(hdr.tag));
+ break;
+ }
+ cb = (void *)skb->cb;
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp,
+ "mailbox msg not supported type:%d\n",
+ nfp_ccm_get_type(skb));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
+ hdr.type,
+ __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (cb->exp_reply && length != cb->exp_reply) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
+ hdr.type, length, cb->exp_reply);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (length > cb->max_len) {
+ nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
+ hdr.type, cb->max_len, length);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (length <= skb->len)
+ __skb_trim(skb, length);
+ else
+ skb_put(skb, length - skb->len);
+
+ /* We overcopy here slightly, but that's okay, the skb is large
+ * enough, and the garbage will be ignored (beyond skb->len).
+ */
+ skb_data = (__be32 *)skb->data;
+ memcpy(skb_data, &hdr, 4);
+
+ cnt = DIV_ROUND_UP(length, 4);
+ for (i = 1 ; i < cnt; i++)
+ skb_data[i] = cpu_to_be32(readl(data + i * 4));
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
+next_tlv:
+ data += round_up(length, 4);
+ if (data + 4 > end) {
+ nn_dp_warn(&nn->dp,
+ "reached end of MBOX without END TLV\n");
+ break;
+ }
+ }
+
+ smp_wmb(); /* order the skb->data vs. cb->state */
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
+ cb->err = -ENOENT;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ }
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void
+nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ cb->err = err;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct sk_buff *skb, *last;
+ int cnt, err;
+
+ space -= 4; /* for End TLV */
+
+ /* First skb must fit, because it's ours and we checked it fits */
+ cnt = 1;
+ last = skb = __skb_peek(&nn->mbox_cmsg.queue);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+
+ while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+ if (space < 0)
+ break;
+ last = skb;
+ nfp_ccm_mbox_set_busy(skb);
+ cnt++;
+ if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
+ break;
+ }
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ /* Now we own all skb's marked in progress, new requests may arrive
+ * at the end of the queue.
+ */
+
+ nn_ctrl_bar_lock(nn);
+
+ nfp_ccm_mbox_copy_in(nn, last);
+
+ err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, last);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
+
+ nn_ctrl_bar_unlock(nn);
+
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ if (cb->err)
+ dev_kfree_skb_any(skb);
+ return cb->err;
+}
+
+/* If wait timed out but the command is already in progress we have
+ * to wait until it finishes. Runners has ownership of the skbs marked
+ * as busy.
+ */
+static int
+nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ bool was_first;
+
+ if (nfp_ccm_mbox_in_progress(skb)) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
+ smp_rmb(); /* pairs with smp_wmb() after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ was_first = nfp_ccm_mbox_should_run(nn, skb);
+ __skb_unlink(skb, &nn->mbox_cmsg.queue);
+ if (was_first)
+ nfp_ccm_mbox_mark_next_runner(nn);
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (was_first)
+ wake_up_all(&nn->mbox_cmsg.wq);
+
+ nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
+ type);
+ return -ETIMEDOUT;
+}
+
+static int
+nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size, unsigned int max_reply_size,
+ gfp_t flags)
+{
+ const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
+ unsigned int max_len;
+ ssize_t undersize;
+ int err;
+
+ if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
+ nn_dp_warn(&nn->dp,
+ "message type %d not supported by mailbox\n", type);
+ return -EINVAL;
+ }
+
+ /* If the reply size is unknown assume it will take the entire
+ * mailbox, the callers should do their best for this to never
+ * happen.
+ */
+ if (!max_reply_size)
+ max_reply_size = mbox_max;
+ max_reply_size = round_up(max_reply_size, 4);
+
+ /* Make sure we can fit the entire reply into the skb,
+ * and that we don't have to slow down the mbox handler
+ * with allocations.
+ */
+ undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
+ if (undersize > 0) {
+ err = pskb_expand_head(skb, 0, undersize, flags);
+ if (err) {
+ nn_dp_warn(&nn->dp,
+ "can't allocate reply buffer for mailbox\n");
+ return err;
+ }
+ }
+
+ /* Make sure that request and response both fit into the mailbox */
+ max_len = max(max_reply_size, round_up(skb->len, 4));
+ if (max_len > mbox_max) {
+ nn_dp_warn(&nn->dp,
+ "message too big for tha mailbox: %u/%u vs %u\n",
+ skb->len, max_reply_size, mbox_max);
+ return -EMSGSIZE;
+ }
+
+ nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
+
+ return 0;
+}
+
+static int
+nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ assert_spin_locked(&nn->mbox_cmsg.queue.lock);
+
+ if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
+ nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
+ return -EBUSY;
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
+
+ __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
+
+ return 0;
+}
+
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
+ max_reply_size, GFP_KERNEL);
+ if (err)
+ goto err_free_skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type);
+ if (err)
+ goto err_unlock;
+
+ /* First in queue takes the mailbox lock and processes the batch */
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ bool to;
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ to = !wait_event_timeout(nn->mbox_cmsg.wq,
+ nfp_ccm_mbox_done(skb) ||
+ nfp_ccm_mbox_should_run(nn, skb),
+ msecs_to_jiffies(NFP_CCM_TIMEOUT));
+
+ /* fast path for those completed by another thread */
+ if (nfp_ccm_mbox_done(skb)) {
+ smp_rmb(); /* pairs with wmb after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ WARN_ON(!to);
+
+ err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
+ if (err)
+ goto err_free_skb;
+ return 0;
+ }
+ }
+
+ /* run queue expects the lock held */
+ nfp_ccm_mbox_run_queue_unlock(nn);
+ return nfp_ccm_mbox_skb_return(skb);
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+struct sk_buff *
+nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags)
+{
+ unsigned int max_size;
+ struct sk_buff *skb;
+
+ if (!reply_size)
+ max_size = nfp_ccm_mbox_max_msg(nn);
+ else
+ max_size = max(req_size, reply_size);
+ max_size = round_up(max_size, 4);
+
+ skb = alloc_skb(max_size, flags);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, req_size);
+
+ return skb;
+}
+
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
+{
+ return nfp_ccm_mbox_max_msg(nn) >= size;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
new file mode 100644
index 000000000000..1f97fb443134
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_H
+#define NFP_CRYPTO_H 1
+
+struct nfp_net_tls_offload_ctx {
+ __be32 fw_handle[2];
+
+ u32 next_seq;
+ bool out_of_sync;
+};
+
+#ifdef CONFIG_TLS_DEVICE
+int nfp_net_tls_init(struct nfp_net *nn);
+#else
+static inline int nfp_net_tls_init(struct nfp_net *nn)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
new file mode 100644
index 000000000000..192ba907d91b
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_FW_H
+#define NFP_CRYPTO_FW_H 1
+
+#include "../ccm.h"
+
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+
+struct nfp_crypto_reply_simple {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+};
+
+struct nfp_crypto_req_reset {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+};
+
+#define NFP_NET_TLS_IPVER GENMASK(15, 12)
+#define NFP_NET_TLS_VLAN GENMASK(11, 0)
+#define NFP_NET_TLS_VLAN_UNUSED 4095
+
+struct nfp_crypto_req_add_front {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ u8 key_len;
+ __be16 ipver_vlan __packed;
+ u8 l4_proto;
+};
+
+struct nfp_crypto_req_add_back {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 key[8];
+ __be32 salt;
+ __be32 iv[2];
+ __be32 counter;
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+
+struct nfp_crypto_req_add_v4 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip;
+ __be32 dst_ip;
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_req_add_v6 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_reply_add {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_del {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_update {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ __be32 handle[2];
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
new file mode 100644
index 000000000000..c638223e9f60
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <net/tls.h>
+
+#include "../ccm.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+#include "fw.h"
+
+#define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
+ (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
+
+#define NFP_NET_TLS_OPCODE_MASK_RX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
+
+#define NFP_NET_TLS_OPCODE_MASK_TX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
+
+#define NFP_NET_TLS_OPCODE_MASK \
+ (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
+
+static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
+{
+ u32 off, val;
+
+ off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
+
+ val = nn_readl(nn, off);
+ if (on)
+ val |= BIT(opcode & 31);
+ else
+ val &= ~BIT(opcode & 31);
+ nn_writel(nn, off, val);
+}
+
+static bool
+__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 opcode;
+ int cnt;
+
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ nn->ktls_tx_conn_cnt += add;
+ cnt = nn->ktls_tx_conn_cnt;
+ nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
+
+ /* Care only about 0 -> 1 and 1 -> 0 transitions */
+ if (cnt > 1)
+ return false;
+
+ nfp_net_crypto_set_op(nn, opcode, cnt);
+ return true;
+}
+
+static int
+nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ int ret = 0;
+
+ /* Use the BAR lock to protect the connection counts */
+ nn_ctrl_bar_lock(nn);
+ if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ /* Undo the cnt adjustment if failed */
+ if (ret)
+ __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
+ }
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+static int
+nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
+}
+
+static int
+nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
+}
+
+static struct sk_buff *
+nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
+{
+ return nfp_ccm_mbox_alloc(nn, req_sz,
+ sizeof(struct nfp_crypto_reply_simple),
+ flags);
+}
+
+static int
+nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
+ const char *name, enum nfp_ccm_type type)
+{
+ struct nfp_crypto_reply_simple *reply;
+ int err;
+
+ err = nfp_ccm_mbox_communicate(nn, skb, type,
+ sizeof(*reply), sizeof(*reply));
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
+ return err;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err)
+ nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
+ name, err);
+ dev_consume_skb_any(skb);
+
+ return err;
+}
+
+static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
+{
+ struct nfp_crypto_req_del *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ memcpy(req->handle, fw_handle, sizeof(req->handle));
+
+ nfp_net_tls_communicate_simple(nn, skb, "delete",
+ NFP_CCM_TYPE_CRYPTO_DEL);
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ req->front.key_len += sizeof(__be32) * 2;
+ req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ req->src_ip = inet->inet_saddr;
+ req->dst_ip = inet->inet_daddr;
+ } else {
+ req->src_ip = inet->inet_daddr;
+ req->dst_ip = inet->inet_saddr;
+ }
+
+ return &req->back;
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk,
+ int direction)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ req->front.key_len += sizeof(struct in6_addr) * 2;
+ req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip));
+ } else {
+ memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
+ }
+
+#endif
+ return &req->back;
+}
+
+static void
+nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
+ struct nfp_crypto_req_add_back *back, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ front->l4_proto = IPPROTO_TCP;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ back->src_port = inet->inet_sport;
+ back->dst_port = inet->inet_dport;
+ } else {
+ back->src_port = inet->inet_dport;
+ back->dst_port = inet->inet_sport;
+ }
+}
+
+static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
+{
+ switch (direction) {
+ case TLS_OFFLOAD_CTX_DIR_TX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ case TLS_OFFLOAD_CTX_DIR_RX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static bool
+nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 bit;
+
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ else
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return nn->tlv_caps.crypto_ops & BIT(bit);
+}
+
+static int
+nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *tls_ci;
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_crypto_req_add_front *front;
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_add_back *back;
+ struct nfp_crypto_reply_add *reply;
+ struct sk_buff *skb;
+ size_t req_sz;
+ bool ipv6;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
+ TLS_DRIVER_STATE_SIZE_TX);
+
+ if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
+ return -EOPNOTSUPP;
+
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ req_sz = sizeof(struct nfp_crypto_req_add_v6);
+ ipv6 = true;
+ break;
+ }
+#endif
+ /* fall through */
+ case AF_INET:
+ req_sz = sizeof(struct nfp_crypto_req_add_v4);
+ ipv6 = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_net_tls_conn_add(nn, direction);
+ if (err)
+ return err;
+
+ skb = nfp_ccm_mbox_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_conn_remove;
+ }
+
+ front = (void *)skb->data;
+ front->ep_id = 0;
+ front->key_len = 8;
+ front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(front->resv, 0, sizeof(front->resv));
+
+ if (ipv6)
+ back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction);
+ else
+ back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction);
+
+ nfp_net_tls_set_l4(front, back, sk, direction);
+
+ back->counter = 0;
+ back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
+
+ tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
+ sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
+
+ err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
+ sizeof(*reply), sizeof(*reply));
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err);
+ /* communicate frees skb on error */
+ goto err_conn_remove;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err) {
+ if (err == -ENOSPC) {
+ if (!atomic_fetch_inc(&nn->ktls_no_space))
+ nn_info(nn, "HW TLS table full\n");
+ } else {
+ nn_dp_warn(&nn->dp,
+ "failed to add TLS, FW replied: %d\n", err);
+ }
+ goto err_free_skb;
+ }
+
+ if (!reply->handle[0] && !reply->handle[1]) {
+ nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
+ goto err_fw_remove;
+ }
+
+ ntls = tls_driver_ctx(sk, direction);
+ memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
+ ntls->next_seq = start_offload_tcp_sn;
+ dev_consume_skb_any(skb);
+
+ return 0;
+
+err_fw_remove:
+ nfp_net_tls_del_fw(nn, reply->handle);
+err_free_skb:
+ dev_consume_skb_any(skb);
+err_conn_remove:
+ nfp_net_tls_conn_remove(nn, direction);
+ return err;
+}
+
+static void
+nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+
+ nfp_net_tls_conn_remove(nn, direction);
+
+ ntls = __tls_driver_ctx(tls_ctx, direction);
+ nfp_net_tls_del_fw(nn, ntls->fw_handle);
+}
+
+static const struct tlsdev_ops nfp_net_tls_ops = {
+ .tls_dev_add = nfp_net_tls_add,
+ .tls_dev_del = nfp_net_tls_del,
+};
+
+static int nfp_net_tls_reset(struct nfp_net *nn)
+{
+ struct nfp_crypto_req_reset *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+
+ return nfp_net_tls_communicate_simple(nn, skb, "reset",
+ NFP_CCM_TYPE_CRYPTO_RESET);
+}
+
+int nfp_net_tls_init(struct nfp_net *nn)
+{
+ struct net_device *netdev = nn->dp.netdev;
+ int err;
+
+ if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
+ return 0;
+
+ if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
+ NFP_NET_TLS_CCM_MBOX_OPS_MASK)
+ return 0;
+
+ if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
+ nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
+ nn->tlv_caps.mbox_len);
+ return 0;
+ }
+
+ err = nfp_net_tls_reset(nn);
+ if (err)
+ return err;
+
+ nn_ctrl_bar_lock(nn);
+ nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
+ err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ nn_ctrl_bar_unlock(nn);
+ if (err)
+ return err;
+
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ }
+
+ netdev->tlsdev_ops = &nfp_net_tls_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8c67505865a4..a7a80f4b722a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -162,8 +162,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
pay_len = nfp_flower_cmsg_get_data_len(skb);
- if (pay_len != sizeof(struct nfp_tun_active_tuns) +
- sizeof(struct route_ip_info) * count) {
+ if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df9aff2684ed..661fa5941b91 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -12,11 +12,13 @@
#ifndef _NFP_NET_H_
#define _NFP_NET_H_
+#include <linux/atomic.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/semaphore.h>
#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -372,6 +374,11 @@ struct nfp_net_rx_ring {
* @hw_csum_tx_inner: Counter of inner TX checksum offload requests
* @tx_gather: Counter of packets with Gather DMA
* @tx_lso: Counter of LSO packets sent
+ * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
+ * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
+ * by the fallback path because packets came out of order
+ * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
+ * path could not encrypt them
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
@@ -409,21 +416,28 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete;
+ u64 hw_csum_rx_error;
+ u64 rx_replace_buf_alloc_fail;
+
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
- u64 hw_csum_tx;
+
+ u64 ____cacheline_aligned_in_smp hw_csum_tx;
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+ u64 hw_tls_tx;
- u64 hw_csum_rx_error;
- u64 rx_replace_buf_alloc_fail;
+ u64 tls_tx_fallback;
+ u64 tls_tx_no_fallback;
u64 tx_errors;
u64 tx_busy;
+ /* Cold data follows */
+
u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
@@ -458,6 +472,7 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @chained_metadata_format: Firemware will use new metadata format
+ * @ktls_tx: Is kTLS TX enabled?
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -482,6 +497,7 @@ struct nfp_net_dp {
u8 is_vf:1;
u8 chained_metadata_format:1;
+ u8 ktls_tx:1;
u8 rx_dma_dir;
u8 rx_offset;
@@ -549,7 +565,7 @@ struct nfp_net_dp {
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
* @bar_lock: vNIC config BAR access lock, protects: update,
- * mailbox area
+ * mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -562,6 +578,13 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @tlv_caps: Parsed TLV capabilities
+ * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
+ * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
+ * lack of space
+ * @mbox_cmsg: Common Control Message via vNIC mailbox state
+ * @mbox_cmsg.queue: CCM mbox queue of pending messages
+ * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
+ * @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -620,7 +643,7 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
- struct mutex bar_lock;
+ struct semaphore bar_lock;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -637,6 +660,16 @@ struct nfp_net {
struct nfp_net_tlv_caps tlv_caps;
+ unsigned int ktls_tx_conn_cnt;
+
+ atomic_t ktls_no_space;
+
+ struct {
+ struct sk_buff_head queue;
+ wait_queue_head_t wq;
+ u16 tag;
+ } mbox_cmsg;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -848,12 +881,12 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
{
- mutex_lock(&nn->bar_lock);
+ down(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{
- mutex_unlock(&nn->bar_lock);
+ up(&nn->bar_lock);
}
/* Globals */
@@ -883,6 +916,7 @@ void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b82b684f52ce..e221847d9a3e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
@@ -37,6 +36,7 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
+#include <net/tls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
@@ -45,6 +45,7 @@
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+#include "crypto/crypto.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -271,12 +272,10 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
- lockdep_assert_held(&nn->bar_lock);
-
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
@@ -331,7 +330,6 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
@@ -804,6 +802,72 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync);
}
+#ifdef CONFIG_TLS_DEVICE
+static struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
+{
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct sk_buff *nskb;
+ u32 datalen, seq;
+
+ if (likely(!dp->ktls_tx))
+ return skb;
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ return skb;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ if (unlikely(ntls->next_seq != seq || ntls->out_of_sync)) {
+ /* Pure ACK out of order already */
+ if (!datalen)
+ return skb;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ nskb = tls_encrypt_skb(skb);
+ if (!nskb) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_no_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NULL;
+ }
+ /* encryption wasn't necessary */
+ if (nskb == skb)
+ return skb;
+ /* we don't re-check ring space */
+ if (unlikely(skb_is_nonlinear(nskb))) {
+ nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(nskb);
+ return NULL;
+ }
+
+ /* jump forward, a TX may have gotten lost, need to sync TX */
+ if (!ntls->out_of_sync && seq - ntls->next_seq < U32_MAX / 4)
+ ntls->out_of_sync = true;
+
+ *nr_frags = 0;
+ return nskb;
+ }
+
+ if (datalen) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->hw_tls_tx++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ ntls->next_seq += datalen;
+ return skb;
+}
+#endif
+
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
@@ -811,24 +875,47 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
-static int nfp_net_prep_port_id(struct sk_buff *skb)
+static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ if (likely(!md_dst && !tls_handle))
return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
- if (unlikely(skb_cow_head(skb, 8)))
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- data = skb_push(skb, 8);
- put_unaligned_be32(NFP_NET_META_PORTID, data);
- put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
- return 8;
+ return md_bytes;
}
/**
@@ -851,6 +938,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
+ u64 tls_handle = 0;
u16 qidx;
dp = &nn->dp;
@@ -872,18 +960,23 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- md_bytes = nfp_net_prep_port_id(skb);
- if (unlikely(md_bytes < 0)) {
+#ifdef CONFIG_TLS_DEVICE
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+#endif
+
+ md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
- goto err_free;
+ goto err_dma_err;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
@@ -979,8 +1072,9 @@ err_unmap:
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
-err_free:
+err_dma_err:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
@@ -3704,13 +3798,16 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
- mutex_init(&nn->bar_lock);
+ sema_init(&nn->bar_lock, 1);
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
+ skb_queue_head_init(&nn->mbox_cmsg.queue);
+ init_waitqueue_head(&nn->mbox_cmsg.wq);
+
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
@@ -3733,8 +3830,7 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
-
- mutex_destroy(&nn->bar_lock);
+ WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
@@ -4009,9 +4105,14 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
- if (nn->dp.netdev)
+ if (nn->dp.netdev) {
nfp_net_netdev_init(nn);
+ err = nfp_net_tls_init(nn);
+ if (err)
+ return err;
+ }
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 6d5213b5bcb0..d835c14b7257 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -99,6 +99,21 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->repr_cap = readl(data);
break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ if (length >= 4)
+ caps->mbox_cmsg_types = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 25919e338071..ee6b24e4eacd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -44,6 +44,7 @@
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
+#define NFP_NET_META_CONN_HANDLE 7
#define NFP_META_PORT_ID_CTRL ~0U
@@ -135,6 +136,7 @@
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
+#define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -394,6 +396,7 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
/**
* VLAN filtering using general use mailbox
@@ -466,6 +469,16 @@
* %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
* Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
* can be used on representors.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ * Variable, bitmap of control message types supported by the mailbox handler.
+ * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are
+ * encapsulated into simple TLVs, with an end TLV and written to the Mailbox.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ * 8 words, bitmaps of supported and enabled crypto operations.
+ * First 16B (4 words) contains a bitmap of supported crypto operations,
+ * and next 16B contain the enabled operations.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -475,6 +488,8 @@
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
+#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
struct device;
@@ -484,12 +499,18 @@ struct device;
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
* @repr_cap: capabilities for representors
+ * @mbox_cmsg_types: cmsgs which can be passed through the mailbox
+ * @crypto_ops: supported crypto operations
+ * @crypto_enable_off: offset of crypto ops enable region
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
u32 repr_cap;
+ u32 mbox_cmsg_types;
+ u32 crypto_ops;
+ unsigned int crypto_enable_off;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 851e31e0ba8e..3a8e1af7042d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -150,8 +150,9 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS 9
+#define NN_RVEC_GATHER_STATS 12
#define NN_RVEC_PER_Q_STATS 3
+#define NN_CTRL_PATH_STATS 1
#define SFP_SFF_REV_COMPLIANCE 1
@@ -423,7 +424,8 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
+ NN_CTRL_PATH_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -446,6 +448,11 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
+ data = nfp_pr_et(data, "tx_tls_encrypted");
+ data = nfp_pr_et(data, "tx_tls_ooo");
+ data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
+
+ data = nfp_pr_et(data, "hw_tls_no_space");
return data;
}
@@ -478,6 +485,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[7] = nn->r_vecs[i].tx_gather;
tmp[8] = nn->r_vecs[i].tx_lso;
+ tmp[9] = nn->r_vecs[i].hw_tls_tx;
+ tmp[10] = nn->r_vecs[i].tls_tx_fallback;
+ tmp[11] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -489,6 +499,8 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
+ *data++ = atomic_read(&nn->ktls_no_space);
+
return data;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index fdbb3ce00e20..a391cf6ee4b2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -87,6 +87,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select NET_DEVLINK
---help---
This enables the support for ...
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 84cb62434556..58e2eaf77014 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3248,6 +3248,7 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
{
struct in_device *indev;
+ struct in_ifaddr *ifa;
if (!netxen_destip_supported(adapter))
return;
@@ -3256,7 +3257,8 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
if (!indev)
return;
- for_ifa(indev) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, indev) {
switch (event) {
case NETDEV_UP:
netxen_list_config_ip(adapter, ifa, NX_IP_UP);
@@ -3267,8 +3269,8 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
default:
break;
}
- } endfor_ifa(indev);
-
+ }
+ rcu_read_unlock();
in_dev_put(indev);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index c5e96ce20f59..89fe091c958d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -140,6 +140,7 @@ struct qed_cxt_mngr;
struct qed_sb_sp_info;
struct qed_ll2_info;
struct qed_mcp_info;
+struct qed_llh_info;
struct qed_rt_data {
u32 *init_val;
@@ -741,6 +742,7 @@ struct qed_dev {
#define QED_DEV_ID_MASK 0xff00
#define QED_DEV_ID_MASK_BB 0x1600
#define QED_DEV_ID_MASK_AH 0x8000
+#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -801,6 +803,11 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* Engine affinity */
+ u8 l2_affin_hint;
+ u8 fir_affin;
+ u8 iwarp_affin;
+
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
@@ -815,6 +822,10 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* LLH info */
+ u8 ppfid_bitmap;
+ struct qed_llh_info *p_llh_info;
+
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
@@ -852,6 +863,9 @@ struct qed_dev {
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
+
+ struct devlink *dl;
+ bool iwarp_cmt;
};
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
@@ -904,6 +918,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
+/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
+#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
+#define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
+#define QED_AFFIN_HWFN(dev) \
+ (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
+ QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
+#define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
/* Flags for indication of required queues */
#define PQ_FLAGS_RLS (BIT(0))
@@ -923,8 +945,6 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
-
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index e61d1d905415..8e1bdf58b9e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2351,7 +2351,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
- reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ NULL);
if (elem_type == QED_ELEM_CXT) {
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@@ -2457,7 +2458,7 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
(u64) (uintptr_t) &ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
- 0);
+ NULL);
}
qed_ptt_release(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 979f1e4bc18b..8525e6bf6ae5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2537,7 +2537,7 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
(len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
wide_bus)) {
if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
- (u64)(uintptr_t)(dump_buf), len, 0))
+ (u64)(uintptr_t)(dump_buf), len, NULL))
return len;
dev_data->use_dmae = 0;
DP_VERBOSE(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index fccdb06fc5c5..eec7cb65c7e6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -361,6 +361,926 @@ void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
/******************** Doorbell Recovery end ****************/
+/********************************** NIG LLH ***********************************/
+
+enum qed_llh_filter_type {
+ QED_LLH_FILTER_TYPE_MAC,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+};
+
+struct qed_llh_mac_filter {
+ u8 addr[ETH_ALEN];
+};
+
+struct qed_llh_protocol_filter {
+ enum qed_llh_prot_filter_type_t type;
+ u16 source_port_or_eth_type;
+ u16 dest_port;
+};
+
+union qed_llh_filter {
+ struct qed_llh_mac_filter mac;
+ struct qed_llh_protocol_filter protocol;
+};
+
+struct qed_llh_filter_info {
+ bool b_enabled;
+ u32 ref_cnt;
+ enum qed_llh_filter_type type;
+ union qed_llh_filter filter;
+};
+
+struct qed_llh_info {
+ /* Number of LLH filters banks */
+ u8 num_ppfid;
+
+#define MAX_NUM_PPFID 8
+ u8 ppfid_array[MAX_NUM_PPFID];
+
+ /* Array of filters arrays:
+ * "num_ppfid" elements of filters banks, where each is an array of
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
+ */
+ struct qed_llh_filter_info **pp_filters;
+};
+
+static void qed_llh_free(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ u32 i;
+
+ if (p_llh_info) {
+ if (p_llh_info->pp_filters)
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
+ kfree(p_llh_info->pp_filters[i]);
+
+ kfree(p_llh_info->pp_filters);
+ }
+
+ kfree(p_llh_info);
+ cdev->p_llh_info = NULL;
+}
+
+static int qed_llh_alloc(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info;
+ u32 size, i;
+
+ p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL);
+ if (!p_llh_info)
+ return -ENOMEM;
+ cdev->p_llh_info = p_llh_info;
+
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
+ if (!(cdev->ppfid_bitmap & (0x1 << i)))
+ continue;
+
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ p_llh_info->num_ppfid, i);
+ p_llh_info->num_ppfid++;
+ }
+
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
+ p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters)
+ return -ENOMEM;
+
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(**p_llh_info->pp_filters);
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
+ p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_llh_shadow_sanity(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, const char *action)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
+ action, ppfid, p_llh_info->num_ppfid);
+ return -EINVAL;
+ }
+
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define QED_LLH_INVALID_FILTER_IDX 0xff
+
+static int
+qed_llh_shadow_search_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!memcmp(p_filter, &p_filters[i].filter,
+ sizeof(*p_filter))) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!p_filters[i].b_enabled) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ u8 filter_idx,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ p_filters[filter_idx].b_enabled = true;
+ p_filters[filter_idx].type = type;
+ memcpy(&p_filters[filter_idx].filter, p_filter,
+ sizeof(p_filters[filter_idx].filter));
+ }
+
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ /* Check if the same filter already exist */
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* Find a new entry in case of a new filter */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx);
+ if (rc)
+ return rc;
+ }
+
+ /* No free entry was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev,
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
+ ppfid);
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type,
+ p_filter, p_ref_cnt);
+}
+
+static int
+__qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ DP_NOTICE(cdev,
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
+ return -EINVAL;
+ }
+
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
+ if (!p_filters[filter_idx].ref_cnt)
+ memset(&p_filters[filter_idx],
+ 0, sizeof(p_filters[filter_idx]));
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* No matching filter was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n");
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx,
+ p_ref_cnt);
+}
+
+static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "ppfid %d is not valid, available indices are 0..%hhd\n",
+ ppfid, p_llh_info->num_ppfid - 1);
+ return -EINVAL;
+ }
+
+ *p_abs_ppfid = p_llh_info->ppfid_array[ppfid];
+
+ return 0;
+}
+
+static int
+qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_eng eng;
+ u8 ppfid;
+ int rc;
+
+ rc = qed_mcp_get_engine_config(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the engine affinity configuration\n");
+ return rc;
+ }
+
+ /* RoCE PF is bound to a single engine */
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ rc = qed_llh_set_roce_affinity(cdev, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ /* Storage PF is bound to a single engine while L2 PF uses both */
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ else /* L2_PERSONALITY */
+ eng = QED_BOTH_ENG;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
+static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 ppfid, abs_ppfid;
+ int rc;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ u32 addr;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
+
+ if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+ if (rc)
+ DP_NOTICE(cdev,
+ "Failed to add an LLH filter with the primary MAC\n");
+ }
+
+ if (QED_IS_CMT(cdev)) {
+ rc = qed_llh_set_engine_affin(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev)
+{
+ return cdev->p_llh_info->num_ppfid;
+}
+
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
+
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
+ if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn))
+ cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0;
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 ppfid, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
+ 0xf); /* QP bit 15 */
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+struct qed_llh_filter_details {
+ u64 value;
+ u32 mode;
+ u32 protocol_type;
+ u32 hdr_sel;
+ u32 enable;
+};
+
+static int
+qed_llh_access_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx,
+ struct qed_llh_filter_details *p_details)
+{
+ struct qed_dmae_params params = {0};
+ u32 addr;
+ u8 pfid;
+ int rc;
+
+ /* The NIG/LLH registers that are accessed in this function have only 16
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
+ * default ppfid. Accessing filters of other ppfids requires pretending
+ * to another PFs.
+ * The calculation of PPFID->PFID in AH is based on the relative index
+ * of a PF on its port.
+ * For BB the pfid is actually the abs_ppfid.
+ */
+ if (QED_IS_BB(p_hwfn->cdev))
+ pfid = abs_ppfid;
+ else
+ pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine +
+ MFW_PORT(p_hwfn);
+
+ /* Filter enable - should be done first when removing a filter */
+ if (!p_details->enable) {
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ }
+
+ /* Filter value */
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
+
+ params.flags = QED_DMAE_FLAG_PF_DST;
+ params.dst_pfid = pfid;
+ rc = qed_dmae_host2grc(p_hwfn,
+ p_ptt,
+ (u64)(uintptr_t)&p_details->value,
+ addr, 2 /* size_in_dwords */,
+ &params);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ /* Filter mode */
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->mode);
+
+ /* Filter protocol type */
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type);
+
+ /* Filter header select */
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel);
+
+ /* Filter enable - should be done last when adding a filter */
+ if (p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+ }
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ return 0;
+}
+
+static int
+qed_llh_add_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx, u8 filter_prot_type, u32 high, u32 low)
+{
+ struct qed_llh_filter_details filter_details;
+
+ filter_details.enable = 1;
+ filter_details.value = ((u64)high << 32) | low;
+ filter_details.hdr_sel = 0;
+ filter_details.protocol_type = filter_prot_type;
+ /* Mode: 0: MAC-address classification 1: protocol classification */
+ filter_details.mode = filter_prot_type ? 1 : 0;
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+static int
+qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
+{
+ struct qed_llh_filter_details filter_details = {0};
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid;
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ memcpy(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = qed_llh_shadow_add_filter(cdev, ppfid,
+ QED_LLH_FILTER_TYPE_MAC,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ high = mac_addr[1] | (mac_addr[0] << 8);
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
+ (mac_addr[2] << 24);
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ 0, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int
+qed_llh_protocol_filter_stringify(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u8 *str, size_t str_len)
+{
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ snprintf(str, str_len, "Ethertype 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ snprintf(str, str_len, "TCP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ snprintf(str, str_len, "UDP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ snprintf(str, str_len, "TCP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ snprintf(str, str_len, "UDP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u32 *p_high, u32 *p_low)
+{
+ *p_high = 0;
+ *p_low = 0;
+
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ *p_high = source_port_or_eth_type;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ *p_low = source_port_or_eth_type << 16;
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ *p_low = dest_port;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32], type_bitmap;
+ union qed_llh_filter filter = {};
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_add_filter(cdev,
+ ppfid,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_protocol_filter_to_hilo(cdev, type,
+ source_port_or_eth_type,
+ dest_port, &high, &low);
+ if (rc)
+ goto err;
+
+ type_bitmap = 0x1 << type;
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, type_bitmap, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(p_hwfn,
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ ether_addr_copy(filter.mac.addr, mac_addr);
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+void qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32];
+ union qed_llh_filter filter = {};
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+/******************************* NIG LLH - End ********************************/
+
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
@@ -461,6 +1381,8 @@ void qed_resc_free(struct qed_dev *cdev)
kfree(cdev->reset_stats);
cdev->reset_stats = NULL;
+ qed_llh_free(cdev);
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1428,6 +2350,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ rc = qed_llh_alloc(cdev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the llh_info structure\n");
+ goto alloc_err;
+ }
+
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
if (!cdev->reset_stats)
goto alloc_no_mem;
@@ -1879,6 +2808,10 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
{
int rc = 0;
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
+
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
if (rc)
return rc;
@@ -1964,6 +2897,13 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* Use the leading hwfn since in CMT only NIG #0 is operational */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_llh_hw_init_pf(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -2393,6 +3333,12 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
if (!cdev->recov_in_prog) {
rc = qed_mcp_unload_done(p_hwfn, p_ptt);
if (rc) {
@@ -2868,6 +3814,36 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
return 0;
}
+static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 native_ppfid_idx;
+ int rc;
+
+ /* Calculation of BB/AH is different for native_ppfid_idx */
+ if (QED_IS_BB(cdev))
+ native_ppfid_idx = p_hwfn->rel_pf_id;
+ else
+ native_ppfid_idx = p_hwfn->rel_pf_id /
+ cdev->num_ports_in_engine;
+
+ rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP)
+ return rc;
+ else if (rc == -EOPNOTSUPP)
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+
+ if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
+ native_ppfid_idx, cdev->ppfid_bitmap);
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+ }
+
+ return 0;
+}
+
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_unlock_params resc_unlock_params;
@@ -2925,6 +3901,13 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"Failed to release the resource lock for the resource allocation commands\n");
}
+ /* PPFID bitmap */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
/* Sanity for ILT */
if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
(!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
@@ -3443,6 +4426,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
+ u64 db_phys_addr,
enum qed_pci_personality personality)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -3451,6 +4435,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;
if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
@@ -3546,7 +4531,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
cdev->regview,
- cdev->doorbells, personality);
+ cdev->doorbells,
+ cdev->db_phys_addr,
+ personality);
if (rc)
return rc;
@@ -3555,22 +4542,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the rest of the hwfns */
if (cdev->num_hwfns > 1) {
void __iomem *p_regview, *p_doorbell;
- u8 __iomem *addr;
+ u64 db_phys_addr;
+ u32 offset;
/* adjust bar offset for second engine */
- addr = cdev->regview +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_0) / 2;
- p_regview = addr;
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ p_regview = cdev->regview + offset;
+
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
- addr = cdev->doorbells +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_1) / 2;
- p_doorbell = addr;
+ p_doorbell = cdev->doorbells + offset;
+
+ db_phys_addr = cdev->db_phys_addr + offset;
/* prepare second hw function */
rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
- p_doorbell, personality);
+ p_doorbell, db_phys_addr,
+ personality);
/* in case of error, need to free the previously
* initiliazed hwfn 0.
@@ -3951,269 +4941,6 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
return 0;
}
-static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
- u8 *p_filter)
-{
- *p_high = p_filter[1] | (p_filter[0] << 8);
- *p_low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-}
-
-int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter)
-{
- u32 high = 0, low = 0, en;
- int i;
-
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
- return 0;
-
- qed_llh_mac_to_filter(&high, &low, p_filter);
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn,
- "Failed to find an empty LLH filter to utilize\n");
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "mac: %pM is added at %d\n",
- p_filter, i);
-
- return 0;
-}
-
-void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter)
-{
- u32 high = 0, low = 0;
- int i;
-
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
- return;
-
- qed_llh_mac_to_filter(&high, &low, p_filter);
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "mac: %pM is removed from %d\n",
- p_filter, i);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
-}
-
-int
-qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port, enum qed_llh_port_filter_type_t type)
-{
- u32 high = 0, low = 0, en;
- int i;
-
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
- return 0;
-
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- case QED_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- case QED_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn,
- "Non valid LLH protocol filter type %d\n", type);
- return -EINVAL;
- }
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1 << type);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn,
- "Failed to find an empty LLH filter to utilize\n");
- return -EINVAL;
- }
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "ETH type %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP src port %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_UDP_SRC_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP src port %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port, i);
- break;
- case QED_LLH_FILTER_UDP_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port, i);
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
- break;
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
- break;
- }
- return 0;
-}
-
-void
-qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type)
-{
- u32 high = 0, low = 0;
- int i;
-
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
- return;
-
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- case QED_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- case QED_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn,
- "Non valid LLH protocol filter type %d\n", type);
- return;
- }
-
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
- continue;
- if (!qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
- continue;
- if (!(qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32)) & BIT(type)))
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
-}
-
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e4b4e3b78e8a..47376d4d071f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -241,11 +241,17 @@ enum qed_dmae_address_type_t {
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_PORT 0x00000010
+#define QED_DMAE_FLAG_PF_SRC 0x00000020
+#define QED_DMAE_FLAG_PF_DST 0x00000040
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
};
/**
@@ -257,7 +263,7 @@ struct qed_dmae_params {
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
- * @param flags (one of the flags defined above)
+ * @param p_params (default parameters will be used in case of NULL)
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -265,7 +271,7 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
- u32 flags);
+ struct qed_dmae_params *p_params);
/**
* @brief qed_dmae_grc2host - Read data from dmae data offset
@@ -275,11 +281,11 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
- * @param flags - one of the flags defined above
+ * @param p_params (default parameters will be used in case of NULL)
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
- u32 flags);
+ struct qed_dmae_params *p_params);
/**
* @brief qed_dmae_host2host - copy data from to source address
@@ -290,7 +296,7 @@ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
* @param source_addr
* @param dest_addr
* @param size_in_dwords
- * @param params
+ * @param p_params (default parameters will be used in case of NULL)
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -368,26 +374,66 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
- * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to add
+ * @param cdev
+ *
+ * @return u8 - Number of LLH filter banks
*/
-int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter);
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
+
+enum qed_eng {
+ QED_ENG0,
+ QED_ENG1,
+ QED_BOTH_ENG,
+};
/**
- * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
+ * LLH filter bank.
+ *
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param eng
+ *
+ * @return int
+ */
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
+ u8 ppfid, enum qed_eng eng);
+
+/**
+ * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ *
+ * @param cdev
+ * @param eng
+ *
+ * @return int
+ */
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
+
+/**
+ * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
+
+/**
+ * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
+ * filter bank.
*
- * @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to remove
*/
-void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter);
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
-enum qed_llh_port_filter_type_t {
+enum qed_llh_prot_filter_type_t {
QED_LLH_FILTER_ETHERTYPE,
QED_LLH_FILTER_TCP_SRC_PORT,
QED_LLH_FILTER_TCP_DEST_PORT,
@@ -398,36 +444,37 @@ enum qed_llh_port_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh
+ * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*/
int
-qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type);
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh
+ * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
*/
void
-qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type);
+qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
* *@brief Cleanup of previous driver remains prior to load
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 46dc93d3b9b5..de31a382f58e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -745,7 +745,7 @@ struct qed_hash_fcoe_con {
static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
struct qed_dev_fcoe_info *info)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
int rc;
memset(info, 0, sizeof(*info));
@@ -806,15 +806,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
if (!p_ptt)
return -EAGAIN;
/* Stop the fcoe */
- rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
+ rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
return rc;
}
@@ -828,8 +828,8 @@ static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
return 0;
}
- rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL);
+ rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
if (rc) {
DP_NOTICE(cdev, "Failed to start fcoe\n");
return rc;
@@ -849,7 +849,7 @@ static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
return -ENOMEM;
}
- rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
if (rc) {
DP_NOTICE(cdev, "Failed to gather task information\n");
qed_fcoe_stop(cdev);
@@ -884,7 +884,7 @@ static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
}
/* Acquire the connection */
- rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
&hash_con->con);
if (rc) {
DP_NOTICE(cdev, "Failed to acquire Connection\n");
@@ -898,7 +898,7 @@ static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
hash_add(cdev->connections, &hash_con->node, *handle);
if (p_doorbell)
- *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
+ *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
*handle);
return 0;
@@ -916,7 +916,7 @@ static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
}
hlist_del(&hash_con->node);
- qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
kfree(hash_con);
return 0;
@@ -971,7 +971,7 @@ static int qed_fcoe_offload_conn(struct qed_dev *cdev,
con->d_id.addr_mid = conn_info->d_id.addr_mid;
con->d_id.addr_lo = conn_info->d_id.addr_lo;
- return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -992,13 +992,13 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
con = hash_con->con;
con->terminate_params = terminate_params;
- return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
{
- return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
+ return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
}
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37edaa847512..e054f6c69e3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12612,8 +12612,10 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
+#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
+#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
@@ -12802,6 +12804,18 @@ struct public_drv_mb {
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 72ec1c6bdf70..a4de9e3ef72c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -392,11 +392,15 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
}
/* DMAE */
+#define QED_DMAE_FLAGS_IS_SET(params, flag) \
+ ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
+
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u8 src_pfid, dst_pfid, port_id;
u16 opcode_b = 0;
u32 opcode = 0;
@@ -407,14 +411,18 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
- opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
- opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT);
/* Whether to write a completion word to the completion destination:
@@ -425,12 +433,14 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
- if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
- opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
+ p_params->port_id : p_hwfn->port_id;
+ opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
/* reset source address in next go */
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
@@ -441,7 +451,7 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
DMAE_CMD_DST_ADDR_RESET_SHIFT);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
@@ -449,7 +459,7 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
DMAE_CMD_SRC_VF_ID_SHIFT;
}
- if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
@@ -733,7 +743,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
- if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
if (src_type == QED_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
@@ -771,14 +781,12 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct qed_dmae_params params;
int rc;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
@@ -786,7 +794,7 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
grc_addr_in_dw,
QED_DMAE_ADDRESS_HOST_VIRT,
QED_DMAE_ADDRESS_GRC,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
@@ -796,21 +804,19 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+ dma_addr_t dest_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct qed_dmae_params params;
int rc;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, QED_DMAE_ADDRESS_GRC,
QED_DMAE_ADDRESS_HOST_VIRT,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
@@ -842,7 +848,6 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
u32 size = PAGE_SIZE / 2, val;
- struct qed_dmae_params params;
int rc = 0;
dma_addr_t p_phys;
void *p_virt;
@@ -875,9 +880,8 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
(u64)p_phys,
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
- memset(&params, 0, sizeof(params));
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
- size / 4 /* size_in_dwords */, &params);
+ size / 4, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 34193c2f1699..a868d7f88601 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -131,7 +131,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
- addr + (i << 2), segment, 0);
+ addr + (i << 2), segment, NULL);
if (rc)
return rc;
@@ -194,7 +194,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
} else {
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(buf + dmae_data_offset),
- addr, size, 0);
+ addr, size, NULL);
}
return rc;
@@ -205,6 +205,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+ struct qed_dmae_params params = {};
memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
@@ -214,10 +215,10 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
-
+ params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, &params);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index fdfedbc8e431..4e8118a08654 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1508,10 +1508,10 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2, NULL);
qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2, NULL);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -2362,7 +2362,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2376,7 +2376,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- sb_id * sizeof(u64), 2, 0);
+ sb_id * sizeof(u64), 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 4f8a685d1a55..5585c18053ec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1082,7 +1082,7 @@ struct qed_hash_iscsi_con {
static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
struct qed_dev_iscsi_info *info)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
int rc;
@@ -1141,8 +1141,8 @@ static int qed_iscsi_stop(struct qed_dev *cdev)
}
/* Stop the iscsi */
- rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL);
+ rc = qed_sp_iscsi_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
return rc;
@@ -1161,9 +1161,8 @@ static int qed_iscsi_start(struct qed_dev *cdev,
return 0;
}
- rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL, event_context,
- async_event_cb);
+ rc = qed_sp_iscsi_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL, event_context, async_event_cb);
if (rc) {
DP_NOTICE(cdev, "Failed to start iscsi\n");
return rc;
@@ -1182,8 +1181,7 @@ static int qed_iscsi_start(struct qed_dev *cdev,
return -ENOMEM;
}
- rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev),
- tid_info);
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
if (rc) {
DP_NOTICE(cdev, "Failed to gather task information\n");
qed_iscsi_stop(cdev);
@@ -1215,7 +1213,7 @@ static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
return -ENOMEM;
/* Acquire the connection */
- rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ rc = qed_iscsi_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
&hash_con->con);
if (rc) {
DP_NOTICE(cdev, "Failed to acquire Connection\n");
@@ -1229,7 +1227,7 @@ static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
hash_add(cdev->connections, &hash_con->node, *handle);
if (p_doorbell)
- *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev),
+ *p_doorbell = qed_iscsi_get_db_addr(QED_AFFIN_HWFN(cdev),
*handle);
return 0;
@@ -1247,7 +1245,7 @@ static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
}
hlist_del(&hash_con->node);
- qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ qed_iscsi_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
kfree(hash_con);
return 0;
@@ -1324,7 +1322,7 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
/* Set default values on other connection fields */
con->offl_flags = 0x1;
- return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_iscsi_conn_offload(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1351,7 +1349,7 @@ static int qed_iscsi_update_conn(struct qed_dev *cdev,
con->first_seq_length = conn_info->first_seq_length;
con->exp_stat_sn = conn_info->exp_stat_sn;
- return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_iscsi_conn_update(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1366,8 +1364,7 @@ static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
return -EINVAL;
}
- return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1385,14 +1382,13 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
hash_con->con->abortive_dsconnect = abrt_conn;
- return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
{
- return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
+ return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
}
static int qed_iscsi_change_mac(struct qed_dev *cdev,
@@ -1407,8 +1403,7 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
return -EINVAL;
}
- return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_mac_update(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ded556b7bab5..7c71ea15251f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2528,7 +2528,7 @@ qed_iwarp_ll2_slowpath(void *cxt,
memset(fpdu, 0, sizeof(*fpdu));
}
-static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
int rc = 0;
@@ -2563,8 +2563,9 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
}
- qed_llh_remove_mac_filter(p_hwfn,
- p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
+ p_hwfn->p_rdma_info->iwarp.mac_addr);
+
return rc;
}
@@ -2608,8 +2609,7 @@ qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
static int
qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params,
- struct qed_ptt *p_ptt)
+ struct qed_rdma_start_in_params *params)
{
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
@@ -2628,7 +2628,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
- rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
if (rc)
return rc;
@@ -2653,7 +2653,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
- qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
return rc;
}
@@ -2757,12 +2757,12 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
&iwarp_info->mpa_buf_list);
return rc;
err:
- qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+ qed_iwarp_ll2_stop(p_hwfn);
return rc;
}
-int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params)
{
struct qed_iwarp_info *iwarp_info;
@@ -2794,10 +2794,10 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_iwarp_async_event);
qed_ooo_setup(p_hwfn);
- return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
+ return qed_iwarp_ll2_start(p_hwfn, params);
}
-int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
{
int rc;
@@ -2808,7 +2808,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
- return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+ return qed_iwarp_ll2_stop(p_hwfn);
}
static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 7ac959038324..c1b2057d23b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -183,13 +183,13 @@ struct qed_iwarp_listener {
int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
-int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod);
-int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn);
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 57641728df69..9f36e7948222 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2111,7 +2111,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2144,7 +2144,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index b5f419b71287..19a1a58d60f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -239,9 +239,8 @@ out_post1:
buffer->phys_addr = new_phys_addr;
out_post:
- rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
- buffer->phys_addr, 0, buffer, 1);
-
+ rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
if (rc)
qed_ll2_dealloc_buffer(cdev, buffer);
}
@@ -926,16 +925,15 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return 0;
}
-static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
- *handle);
+ DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
+ "Stopping LL2 OOO queue [%02x]\n", *handle);
- qed_ll2_terminate_connection(hwfn, *handle);
- qed_ll2_release_connection(hwfn, *handle);
+ qed_ll2_terminate_connection(p_hwfn, *handle);
+ qed_ll2_release_connection(p_hwfn, *handle);
*handle = QED_LL2_UNUSED_HANDLE;
}
@@ -1574,12 +1572,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FCOE, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FIP, 0,
- QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
}
out:
@@ -1980,12 +1978,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FCOE, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FIP, 0,
- QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
}
out:
@@ -2086,12 +2084,12 @@ static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
sizeof(port_stats));
- p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
- p_stats->gsi_invalid_pkt_length =
+ p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+ p_stats->gsi_invalid_pkt_length +=
HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
- p_stats->gsi_unsupported_pkt_typ =
+ p_stats->gsi_unsupported_pkt_typ +=
HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
- p_stats->gsi_crcchksm_error =
+ p_stats->gsi_crcchksm_error +=
HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
}
@@ -2109,9 +2107,9 @@ static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
- p_stats->packet_too_big_discard =
+ p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(tstats.packet_too_big_discard);
- p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+ p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
}
static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
@@ -2128,12 +2126,12 @@ static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
- p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
@@ -2150,23 +2148,21 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
- p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
}
-int qed_ll2_get_stats(void *cxt,
- u8 connection_handle, struct qed_ll2_stats *p_stats)
+static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
+ struct qed_ll2_stats *p_stats)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ptt *p_ptt;
- memset(p_stats, 0, sizeof(*p_stats));
-
if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
!p_hwfn->p_ll2_info)
return -EINVAL;
@@ -2181,15 +2177,26 @@ int qed_ll2_get_stats(void *cxt,
if (p_ll2_conn->input.gsi_enable)
_qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
+
_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
if (p_ll2_conn->tx_stats_en)
_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
qed_ptt_release(p_hwfn, p_ptt);
+
return 0;
}
+int qed_ll2_get_stats(void *cxt,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ memset(p_stats, 0, sizeof(*p_stats));
+ return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
+}
+
static void qed_ll2b_release_rx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -2216,7 +2223,7 @@ struct qed_ll2_cbs ll2_cbs = {
.tx_release_cb = &qed_ll2b_complete_tx_packet,
};
-static void qed_ll2_set_conn_data(struct qed_dev *cdev,
+static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
struct qed_ll2_acquire_data *data,
struct qed_ll2_params *params,
enum qed_ll2_conn_type conn_type,
@@ -2232,7 +2239,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
data->input.tx_num_desc = QED_LL2_TX_SIZE;
data->p_connection_handle = handle;
data->cbs = &ll2_cbs;
- ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
+ ll2_cbs.cookie = p_hwfn;
if (lb) {
data->input.tx_tc = PKT_LB_TC;
@@ -2243,74 +2250,102 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
}
}
-static int qed_ll2_start_ooo(struct qed_dev *cdev,
+static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_params *params)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_acquire_data data;
int rc;
- qed_ll2_set_conn_data(cdev, &data, params,
+ qed_ll2_set_conn_data(p_hwfn, &data, params,
QED_LL2_TYPE_OOO, handle, true);
- rc = qed_ll2_acquire_connection(hwfn, &data);
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+ DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
goto out;
}
- rc = qed_ll2_establish_connection(hwfn, *handle);
+ rc = qed_ll2_establish_connection(p_hwfn, *handle);
if (rc) {
- DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+ DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
- qed_ll2_release_connection(hwfn, *handle);
+ qed_ll2_release_connection(p_hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
-static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
{
- struct qed_ll2_buffer *buffer, *tmp_buffer;
- enum qed_ll2_conn_type conn_type;
- struct qed_ll2_acquire_data data;
- struct qed_ptt *p_ptt;
- int rc, i;
+ return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
+}
+static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
- /* Initialize LL2 locks & lists */
- INIT_LIST_HEAD(&cdev->ll2->list);
- spin_lock_init(&cdev->ll2->lock);
- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
- L1_CACHE_BYTES + params->mtu;
+ rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
- /*Allocate memory for LL2 */
- DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
- cdev->ll2->rx_size);
- for (i = 0; i < QED_LL2_RX_SIZE; i++) {
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
- goto fail;
- }
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
- rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
- &buffer->phys_addr);
- if (rc) {
- kfree(buffer);
- goto fail;
- }
+ return rc;
+}
- list_add_tail(&buffer->list, &cdev->ll2->list);
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc = 0, rc2 = 0;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+ if (rc2)
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to stop LL2 on engine 0\n");
}
- switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ rc = __qed_ll2_stop(p_hwfn);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc | rc2;
+}
+
+static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_params *params)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ll2_acquire_data data;
+ int rc, rx_cnt;
+
+ switch (p_hwfn->hw_info.personality) {
case QED_PCI_FCOE:
conn_type = QED_LL2_TYPE_FCOE;
break;
@@ -2321,33 +2356,34 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
conn_type = QED_LL2_TYPE_ROCE;
break;
default:
+
conn_type = QED_LL2_TYPE_TEST;
}
- qed_ll2_set_conn_data(cdev, &data, params, conn_type,
+ qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
&cdev->ll2->handle, false);
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 connection\n");
- goto fail;
+ DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
+ return rc;
}
- rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle);
+ rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
if (rc) {
- DP_INFO(cdev, "Failed to establish LL2 connection\n");
- goto release_fail;
+ DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
+ goto release_conn;
}
/* Post all Rx buffers to FW */
spin_lock_bh(&cdev->ll2->lock);
+ rx_cnt = cdev->ll2->rx_cnt;
list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
- rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ rc = qed_ll2_post_rx_buffer(p_hwfn,
cdev->ll2->handle,
buffer->phys_addr, 0, buffer, 1);
if (rc) {
- DP_INFO(cdev,
+ DP_INFO(p_hwfn,
"Failed to post an Rx buffer; Deleting it\n");
dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
cdev->ll2->rx_size, DMA_FROM_DEVICE);
@@ -2355,100 +2391,127 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
list_del(&buffer->list);
kfree(buffer);
} else {
- cdev->ll2->rx_cnt++;
+ rx_cnt++;
}
}
spin_unlock_bh(&cdev->ll2->lock);
- if (!cdev->ll2->rx_cnt) {
- DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
- goto release_terminate;
+ if (rx_cnt == cdev->ll2->rx_cnt) {
+ DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
+ goto terminate_conn;
}
+ cdev->ll2->rx_cnt = rx_cnt;
+
+ return 0;
+
+terminate_conn:
+ qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+release_conn:
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
+ return rc;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ struct qed_ll2_buffer *buffer;
+ int rx_num_desc, i, rc;
if (!is_valid_ether_addr(params->ll2_mac_address)) {
- DP_INFO(cdev, "Invalid Ethernet address\n");
- goto release_terminate;
+ DP_NOTICE(cdev, "Invalid Ethernet address\n");
+ return -EINVAL;
}
- if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
- DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
- rc = qed_ll2_start_ooo(cdev, params);
+ WARN_ON(!cdev->ll2->cbs);
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+
+ /* Allocate memory for LL2.
+ * In CMT mode, in case of a storage PF which is affintized to engine 1,
+ * LL2 is started also on engine 0 and thus we need twofold buffers.
+ */
+ rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
+ DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
+ rx_num_desc, cdev->ll2->rx_size);
+ for (i = 0; i < rx_num_desc; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
if (rc) {
- DP_INFO(cdev,
- "Failed to initialize the OOO LL2 queue\n");
- goto release_terminate;
+ kfree(buffer);
+ goto err0;
}
- }
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_INFO(cdev, "Failed to acquire PTT\n");
- goto release_terminate;
+ list_add_tail(&buffer->list, &cdev->ll2->list);
}
- rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- params->ll2_mac_address);
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ rc = __qed_ll2_start(p_hwfn, params);
if (rc) {
- DP_ERR(cdev, "Failed to allocate LLH filter\n");
- goto release_terminate_all;
+ DP_NOTICE(cdev, "Failed to start LL2\n");
+ goto err0;
}
- ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
- return 0;
-
-release_terminate_all:
-
-release_terminate:
- qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
-release_fail:
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
-fail:
- qed_ll2_kill_buffers(cdev);
- cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
- return -EINVAL;
-}
-
-static int qed_ll2_stop(struct qed_dev *cdev)
-{
- struct qed_ptt *p_ptt;
- int rc;
-
- if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
- return 0;
+ /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
+ * since broadcast/mutlicast packets are routed to engine 0.
+ */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to start LL2 on engine 0\n");
+ goto err1;
+ }
+ }
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_INFO(cdev, "Failed to acquire PTT\n");
- goto fail;
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+ rc = qed_ll2_start_ooo(p_hwfn, params);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start OOO LL2\n");
+ goto err2;
+ }
}
- qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- cdev->ll2_mac_address);
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
- eth_zero_addr(cdev->ll2_mac_address);
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
- if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
- qed_ll2_stop_ooo(cdev);
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
- rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle);
- if (rc)
- DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+ return 0;
+err3:
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+err2:
+ if (b_is_storage_eng1)
+ __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+err1:
+ __qed_ll2_stop(p_hwfn);
+err0:
qed_ll2_kill_buffers(cdev);
-
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
-
return rc;
-fail:
- return -EINVAL;
}
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags)
{
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
u8 flags = 0, nr_frags;
@@ -2506,7 +2569,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
* routine may run and free the SKB, so no dereferencing the SKB
* beyond this point unless skb has any fragments.
*/
- rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
if (rc)
goto err;
@@ -2524,13 +2587,13 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
goto err;
}
- rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
cdev->ll2->handle,
mapping,
skb_frag_size(frag));
/* if failed not much to do here, partial packet has been posted
- * we can't free memory, will need to wait for completion.
+ * we can't free memory, will need to wait for completion
*/
if (rc)
goto err2;
@@ -2540,18 +2603,37 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
err:
dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
-
err2:
return rc;
}
static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
if (!cdev->ll2)
return -EINVAL;
- return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle, stats);
+ rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
+ return rc;
+ }
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to get LL2 stats on engine 0\n");
+ return rc;
+ }
+ }
+
+ return 0;
}
const struct qed_ll2_ops qed_ll2_ops_pass = {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 6de23b56b294..829dd60ab937 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -48,6 +48,7 @@
#include <linux/crc32.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
+#include <net/devlink.h>
#include "qed.h"
#include "qed_sriov.h"
@@ -342,6 +343,107 @@ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
return 0;
}
+struct qed_devlink {
+ struct qed_dev *cdev;
+};
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl;
+ struct qed_dev *cdev;
+
+ qed_dl = devlink_priv(dl);
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl;
+ struct qed_dev *cdev;
+
+ qed_dl = devlink_priv(dl);
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static const struct devlink_ops qed_dl_ops;
+
+static int qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qed_dl;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
+ if (!dl)
+ return -ENOMEM;
+
+ qed_dl = devlink_priv(dl);
+
+ cdev->dl = dl;
+ qed_dl->cdev = cdev;
+
+ rc = devlink_register(dl, &cdev->pdev->dev);
+ if (rc)
+ goto err_free;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ devlink_params_publish(dl);
+ cdev->iwarp_cmt = false;
+
+ return 0;
+
+err_unregister:
+ devlink_unregister(dl);
+
+err_free:
+ cdev->dl = NULL;
+ devlink_free(dl);
+
+ return rc;
+}
+
+static void qed_devlink_unregister(struct qed_dev *cdev)
+{
+ if (!cdev->dl)
+ return;
+
+ devlink_params_unregister(cdev->dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_unregister(cdev->dl);
+ devlink_free(cdev->dl);
+}
+
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@ -370,6 +472,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
}
DP_INFO(cdev, "PCI init completed successfully\n");
+ rc = qed_devlink_register(cdev);
+ if (rc) {
+ DP_INFO(cdev, "Failed to register devlink.\n");
+ goto err2;
+ }
+
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@ -399,6 +507,8 @@ static void qed_remove(struct qed_dev *cdev)
qed_set_power_state(cdev, PCI_D3hot);
+ qed_devlink_unregister(cdev);
+
qed_free_cdev(cdev);
}
@@ -1301,26 +1411,21 @@ static u32 qed_sb_init(struct qed_dev *cdev,
{
struct qed_hwfn *p_hwfn;
struct qed_ptt *p_ptt;
- int hwfn_index;
u16 rel_sb_id;
- u8 n_hwfns;
u32 rc;
- /* RoCE uses single engine and CMT uses two engines. When using both
- * we force only a single engine. Storage uses only engine 0 too.
- */
- if (type == QED_SB_TYPE_L2_QUEUE)
- n_hwfns = cdev->num_hwfns;
- else
- n_hwfns = 1;
-
- hwfn_index = sb_id % n_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
- rel_sb_id = sb_id / n_hwfns;
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
- hwfn_index, rel_sb_id, sb_id);
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
if (IS_PF(p_hwfn->cdev)) {
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1339,20 +1444,26 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info, u16 sb_id)
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
- int hwfn_index;
u16 rel_sb_id;
u32 rc;
- hwfn_index = sb_id % cdev->num_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
- rel_sb_id = sb_id / cdev->num_hwfns;
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
- hwfn_index, rel_sb_id, sb_id);
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
@@ -2372,6 +2483,11 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
return rc;
}
+static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
+{
+ return QED_AFFIN_HWFN_IDX(cdev);
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2419,6 +2535,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.db_recovery_add = &qed_db_recovery_add,
.db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
+ .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cc27fd60d689..758702c1ce9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3685,3 +3685,68 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
+
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 fir_valid, l2_valid;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ cdev->fir_affin =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ cdev->l2_affin_hint =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
+
+ return 0;
+}
+
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ cdev->ppfid_bitmap);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 261c1a392e2c..e4f8fe4bd062 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -1186,4 +1186,20 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+/**
+ * @brief Get the engine affinity configuration.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the PPFID bitmap.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 1302b308bd87..f3ebdc5e8f85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -157,7 +157,8 @@ static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
*timestamp = 0;
val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
if (!(val & QED_TIMESTAMP_MASK)) {
- DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ "Invalid Tx timestamp, buf_seqid = %08x\n", val);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 7873d6dfd91f..f900fde448db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -700,7 +700,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return rc;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
- rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
+ rc = qed_iwarp_setup(p_hwfn, params);
if (rc)
return rc;
} else {
@@ -742,7 +742,7 @@ static int qed_rdma_stop(void *rdma_cxt)
(ll2_ethertype_en & 0xFFFE));
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
- rc = qed_iwarp_stop(p_hwfn, p_ptt);
+ rc = qed_iwarp_stop(p_hwfn);
if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size));
- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size);
@@ -818,14 +818,17 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+ struct qed_mcp_link_state *p_link_output;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
- /* Link may have changed */
- p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+ /* The link state is saved only for the leading hwfn */
+ p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
- p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+ p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
+ : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_link_output->speed;
p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
@@ -870,7 +873,7 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
struct qed_dev_rdma_info *info)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
memset(info, 0, sizeof(*info));
@@ -889,9 +892,9 @@ static int qed_rdma_get_sb_start(struct qed_dev *cdev)
int feat_num;
if (cdev->num_hwfns > 1)
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
else
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
cdev->num_hwfns;
return feat_num;
@@ -899,7 +902,7 @@ static int qed_rdma_get_sb_start(struct qed_dev *cdev)
static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
{
- int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
int n_msix = cdev->int_params.rdma_msix_cnt;
return min_t(int, n_cnq, n_msix);
@@ -1653,7 +1656,7 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
- return QED_LEADING_HWFN(cdev);
+ return QED_AFFIN_HWFN(cdev);
}
static int qed_rdma_modify_srq(void *rdma_cxt,
@@ -1881,7 +1884,7 @@ err:
static int qed_rdma_init(struct qed_dev *cdev,
struct qed_rdma_start_in_params *params)
{
- return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+ return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
}
static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
@@ -1899,23 +1902,12 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
u8 *new_mac_address)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
int rc = 0;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to acquire PTT\n");
- return -EINVAL;
- }
-
if (old_mac_address)
- qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
+ qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
if (new_mac_address)
- rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
-
- qed_ptt_release(p_hwfn, p_ptt);
+ rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
if (rc)
DP_ERR(cdev,
@@ -1924,6 +1916,36 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
return rc;
}
+static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
+{
+ enum qed_eng eng;
+ u8 ppfid = 0;
+ int rc;
+
+ /* Make sure iwarp cmt mode is enabled before setting affinity */
+ if (!cdev->iwarp_cmt)
+ return -EINVAL;
+
+ if (b_reset)
+ eng = QED_BOTH_ENG;
+ else
+ eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
+
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
static const struct qed_rdma_ops qed_rdma_ops_pass = {
.common = &qed_common_ops_pass,
.fill_dev_info = &qed_fill_rdma_dev_info,
@@ -1963,6 +1985,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.ll2_get_stats = &qed_ll2_get_stats,
+ .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
.iwarp_connect = &qed_iwarp_connect,
.iwarp_create_listen = &qed_iwarp_create_listen,
.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 5ce825ca5f24..60f850c3bdd6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -254,6 +254,10 @@
0x500840UL
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
0x50196cUL
+#define NIG_REG_LLH_PPFID2PFID_TBL_0 \
+ 0x501970UL
+#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL \
+ 0x50
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
@@ -1626,6 +1630,8 @@
#define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
+#define NIG_REG_PPF_TO_ENGINE_SEL_SIZE 8
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 5a495fda9e9d..7e0b795230b2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -588,7 +588,7 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 2f318aaf2b05..78f77b712b10 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -917,10 +917,11 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
/* Configure igu sb in CAU which were marked valid */
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
+
qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- p_block->igu_sb_id * sizeof(u64), 2, 0);
+ p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
vf->num_sbs = (u8) num_rx_queues;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 92fe226980fd..b972ab07c18b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -92,6 +92,7 @@ struct qede_stats_common {
u64 non_coalesced_pkts;
u64 coalesced_bytes;
u64 link_change_count;
+ u64 ptp_skip_txts;
/* port */
u64 rx_64_byte_packets;
@@ -189,6 +190,7 @@ struct qede_dev {
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
+ u64 ptp_skip_txts;
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8911a97ab0ca..e85f9fef930c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -174,6 +174,7 @@ static const struct {
QEDE_STAT(coalesced_bytes),
QEDE_STAT(link_change_count),
+ QEDE_STAT(ptp_skip_txts),
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 02a97c659e29..741377b7c00d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -390,6 +390,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_common->brb_discards = stats.common.brb_discards;
p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
p_common->link_change_count = stats.common.link_change_count;
+ p_common->ptp_skip_txts = edev->ptp_skip_txts;
if (QEDE_IS_BB(edev)) {
struct qede_stats_bb *p_bb = &edev->stats.bb;
@@ -1306,7 +1307,8 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
u16 sb_id)
{
if (sb_info->sb_virt) {
- edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
memset(sb_info, 0, sizeof(*sb_info));
@@ -2231,6 +2233,8 @@ out:
if (mode != QEDE_UNLOAD_RECOVERY)
DP_NOTICE(edev, "Link is down\n");
+ edev->ptp_skip_txts = 0;
+
DP_INFO(edev, "Ending qede unload\n");
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index bddb2b5982dc..f815435cf106 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
#include "qede_ptp.h"
+#define QEDE_PTP_TX_TIMEOUT (2 * HZ)
struct qede_ptp {
const struct qed_eth_ptp_ops *ops;
@@ -38,6 +39,7 @@ struct qede_ptp {
struct timecounter tc;
struct ptp_clock *clock;
struct work_struct work;
+ unsigned long ptp_tx_start;
struct qede_dev *edev;
struct sk_buff *tx_skb;
@@ -160,18 +162,30 @@ static void qede_ptp_task(struct work_struct *work)
struct qede_dev *edev;
struct qede_ptp *ptp;
u64 timestamp, ns;
+ bool timedout;
int rc;
ptp = container_of(work, struct qede_ptp, work);
edev = ptp->edev;
+ timedout = time_is_before_jiffies(ptp->ptp_tx_start +
+ QEDE_PTP_TX_TIMEOUT);
/* Read Tx timestamp registers */
spin_lock_bh(&ptp->lock);
rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
spin_unlock_bh(&ptp->lock);
if (rc) {
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&ptp->work);
+ if (unlikely(timedout)) {
+ DP_INFO(edev, "Tx timestamp is not recorded\n");
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags);
+ edev->ptp_skip_txts++;
+ } else {
+ /* Reschedule to keep checking for a valid TS value */
+ schedule_work(&ptp->work);
+ }
return;
}
@@ -514,19 +528,28 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (!ptp)
return;
- if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags)) {
+ DP_ERR(edev, "Timestamping in progress\n");
+ edev->ptp_skip_txts++;
return;
+ }
if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
- DP_NOTICE(edev,
- "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ DP_ERR(edev,
+ "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
} else if (unlikely(ptp->tx_skb)) {
- DP_NOTICE(edev,
- "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ DP_ERR(edev,
+ "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
ptp->tx_skb = skb_get(skb);
+ ptp->ptp_tx_start = jiffies;
schedule_work(&ptp->work);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7a873002e626..c07438db30ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4119,13 +4119,14 @@ static void
qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
struct net_device *dev, unsigned long event)
{
+ const struct in_ifaddr *ifa;
struct in_device *indev;
indev = in_dev_get(dev);
if (!indev)
return;
- for_ifa(indev) {
+ in_dev_for_each_ifa_rtnl(ifa, indev) {
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter,
@@ -4138,7 +4139,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
default:
break;
}
- } endfor_ifa(indev);
+ }
in_dev_put(indev);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 884f1f52dcc2..991d7e285736 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -12,6 +12,7 @@
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
+#include <linux/if_rmnet.h>
struct rmnet_map_control_command {
u8 command_name;
@@ -39,30 +40,6 @@ enum rmnet_map_commands {
RMNET_MAP_COMMAND_ENUM_LENGTH
};
-struct rmnet_map_header {
- u8 pad_len:6;
- u8 reserved_bit:1;
- u8 cd_bit:1;
- u8 mux_id;
- __be16 pkt_len;
-} __aligned(1);
-
-struct rmnet_map_dl_csum_trailer {
- u8 reserved1;
- u8 valid:1;
- u8 reserved2:7;
- u16 csum_start_offset;
- u16 csum_length;
- __be16 csum_value;
-} __aligned(1);
-
-struct rmnet_map_ul_csum_header {
- __be16 csum_start_offset;
- u16 csum_insert_offset:14;
- u16 udp_ip4_ind:1;
- u16 csum_enabled:1;
-} __aligned(1);
-
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 33be8c5ad0c9..d5304bad2372 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
+r8169-objs += r8169_main.o r8169_firmware.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
new file mode 100644
index 000000000000..8f54a2c832eb
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include "r8169_firmware.h"
+
+enum rtl_fw_opcode {
+ PHY_READ = 0x0,
+ PHY_DATA_OR = 0x1,
+ PHY_DATA_AND = 0x2,
+ PHY_BJMPN = 0x3,
+ PHY_MDIO_CHG = 0x4,
+ PHY_CLEAR_READCOUNT = 0x7,
+ PHY_WRITE = 0x8,
+ PHY_READCOUNT_EQ_SKIP = 0x9,
+ PHY_COMP_EQ_SKIPN = 0xa,
+ PHY_COMP_NEQ_SKIPN = 0xb,
+ PHY_WRITE_PREVIOUS = 0xc,
+ PHY_SKIPN = 0xd,
+ PHY_DELAY_MS = 0xe,
+};
+
+struct fw_info {
+ u32 magic;
+ char version[RTL_VER_SIZE];
+ __le32 fw_start;
+ __le32 fw_len;
+ u8 chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_info *fw_info = (struct fw_info *)fw->data;
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+
+ if (fw->size < FW_OPCODE_SIZE)
+ return false;
+
+ if (!fw_info->magic) {
+ size_t i, size, start;
+ u8 checksum = 0;
+
+ if (fw->size < sizeof(*fw_info))
+ return false;
+
+ for (i = 0; i < fw->size; i++)
+ checksum += fw->data[i];
+ if (checksum != 0)
+ return false;
+
+ start = le32_to_cpu(fw_info->fw_start);
+ if (start > fw->size)
+ return false;
+
+ size = le32_to_cpu(fw_info->fw_len);
+ if (size > (fw->size - start) / FW_OPCODE_SIZE)
+ return false;
+
+ strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)(fw->data + start);
+ pa->size = size;
+ } else {
+ if (fw->size % FW_OPCODE_SIZE)
+ return false;
+
+ strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)fw->data;
+ pa->size = fw->size / FW_OPCODE_SIZE;
+ }
+
+ return true;
+}
+
+static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ switch (action >> 28) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_MDIO_CHG:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index)
+ goto out;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= pa->size)
+ goto out;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= pa->size)
+ goto out;
+ break;
+
+ default:
+ dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action);
+ return false;
+ }
+ }
+
+ return true;
+out:
+ dev_err(rtl_fw->dev, "Out of range of firmware\n");
+ return false;
+}
+
+void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ rtl_fw_write_t fw_write = rtl_fw->phy_write;
+ rtl_fw_read_t fw_read = rtl_fw->phy_read;
+ int predata = 0, count = 0;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 data = action & 0x0000ffff;
+ u32 regno = (action & 0x0fff0000) >> 16;
+ enum rtl_fw_opcode opcode = action >> 28;
+
+ if (!action)
+ break;
+
+ switch (opcode) {
+ case PHY_READ:
+ predata = fw_read(tp, regno);
+ count++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ break;
+ case PHY_BJMPN:
+ index -= (regno + 1);
+ break;
+ case PHY_MDIO_CHG:
+ if (data == 0) {
+ fw_write = rtl_fw->phy_write;
+ fw_read = rtl_fw->phy_read;
+ } else if (data == 1) {
+ fw_write = rtl_fw->mac_mcu_write;
+ fw_read = rtl_fw->mac_mcu_read;
+ }
+
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ break;
+ case PHY_WRITE:
+ fw_write(tp, regno, data);
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (count == data)
+ index++;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ fw_write(tp, regno, predata);
+ break;
+ case PHY_SKIPN:
+ index += regno;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ break;
+ }
+ }
+}
+
+void rtl_fw_release_firmware(struct rtl_fw *rtl_fw)
+{
+ release_firmware(rtl_fw->fw);
+}
+
+int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
+{
+ int rc;
+
+ rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
+ if (rc < 0)
+ goto out;
+
+ if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) {
+ release_firmware(rtl_fw->fw);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
+ rtl_fw->fw_name, rc);
+ return rc;
+}
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.h b/drivers/net/ethernet/realtek/r8169_firmware.h
new file mode 100644
index 000000000000..7dc348ed8345
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_firmware.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+
+struct rtl8169_private;
+typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val);
+typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg);
+
+#define RTL_VER_SIZE 32
+
+struct rtl_fw {
+ rtl_fw_write_t phy_write;
+ rtl_fw_read_t phy_read;
+ rtl_fw_write_t mac_mcu_write;
+ rtl_fw_read_t mac_mcu_read;
+ const struct firmware *fw;
+ const char *fw_name;
+ struct device *dev;
+
+ char version[RTL_VER_SIZE];
+
+ struct rtl_fw_phy_action {
+ __le32 *code;
+ size_t size;
+ } phy_action;
+};
+
+int rtl_fw_request_firmware(struct rtl_fw *rtl_fw);
+void rtl_fw_release_firmware(struct rtl_fw *rtl_fw);
+void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169_main.c
index d06a61f00e78..8b7d45ff1d03 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -27,12 +27,13 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
-#include <linux/firmware.h>
#include <linux/prefetch.h>
#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include "r8169_firmware.h"
+
#define MODULENAME "r8169"
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
@@ -81,7 +82,7 @@ static const int multicast_filter_limit = 32;
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
enum mac_version {
- RTL_GIGA_MAC_VER_01 = 0,
+ /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
RTL_GIGA_MAC_VER_02,
RTL_GIGA_MAC_VER_03,
RTL_GIGA_MAC_VER_04,
@@ -132,7 +133,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
- RTL_GIGA_MAC_NONE = 0xff,
+ RTL_GIGA_MAC_NONE
};
#define JUMBO_1K ETH_DATA_LEN
@@ -146,7 +147,6 @@ static const struct {
const char *fw_name;
} rtl_chip_infos[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] = {"RTL8169" },
[RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
[RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
@@ -406,8 +406,6 @@ enum rtl_register_content {
RxOK = 0x0001,
/* RxStatusDesc */
- RxBOVF = (1 << 24),
- RxFOVF = (1 << 23),
RxRWT = (1 << 22),
RxRES = (1 << 21),
RxRUNT = (1 << 20),
@@ -503,9 +501,6 @@ enum rtl_register_content {
LinkStatus = 0x02,
FullDup = 0x01,
- /* _TBICSRBit */
- TBILinkOK = 0x02000000,
-
/* ResetCounterCommand */
CounterReset = 0x1,
@@ -639,7 +634,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
u32 msg_enable;
- u16 mac_version;
+ enum mac_version mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -657,18 +652,7 @@ struct rtl8169_private {
const struct rtl_coalesce_info *coalesce_info;
struct clk *clk;
- struct mdio_ops {
- void (*write)(struct rtl8169_private *, int, int);
- int (*read)(struct rtl8169_private *, int);
- } mdio_ops;
-
- struct jumbo_ops {
- void (*enable)(struct rtl8169_private *);
- void (*disable)(struct rtl8169_private *);
- } jumbo_ops;
-
void (*hw_start)(struct rtl8169_private *tp);
- bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct {
DECLARE_BITMAP(flags, RTL_FLAG_MAX);
@@ -684,18 +668,7 @@ struct rtl8169_private {
u32 saved_wolopts;
const char *fw_name;
- struct rtl_fw {
- const struct firmware *fw;
-
-#define RTL_VER_SIZE 32
-
- char version[RTL_VER_SIZE];
-
- struct rtl_fw_phy_action {
- __le32 *code;
- size_t size;
- } phy_action;
- } *rtl_fw;
+ struct rtl_fw *rtl_fw;
u32 ocp_base;
};
@@ -1015,14 +988,38 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
return value;
}
-static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
+static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
{
- tp->mdio_ops.write(tp, location, val);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ r8168dp_1_mdio_write(tp, location, val);
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ r8168dp_2_mdio_write(tp, location, val);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ r8168g_mdio_write(tp, location, val);
+ break;
+ default:
+ r8169_mdio_write(tp, location, val);
+ break;
+ }
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp, location);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ return r8168dp_1_mdio_read(tp, location);
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ return r8168dp_2_mdio_read(tp, location);
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ return r8168g_mdio_read(tp, location);
+ default:
+ return r8169_mdio_read(tp, location);
+ }
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1424,7 +1421,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
@@ -1954,9 +1951,7 @@ static int rtl_get_eee_supp(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5c);
- ret = phy_read(phydev, 0x12);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5c, 0x12);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -1979,9 +1974,7 @@ static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- ret = phy_read(phydev, 0x11);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5d, 0x11);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2004,9 +1997,7 @@ static int rtl_get_eee_adv(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- ret = phy_read(phydev, 0x10);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5d, 0x10);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2029,9 +2020,7 @@ static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- phy_write(phydev, 0x10, val);
- phy_write(phydev, 0x1f, 0x0000);
+ phy_write_paged(phydev, 0x0a5d, 0x10, val);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2252,7 +2241,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
{ 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
{ 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
- { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 },
/* Catch-all */
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
@@ -2292,246 +2280,10 @@ static void __rtl_writephy_batch(struct rtl8169_private *tp,
#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
-#define PHY_READ 0x00000000
-#define PHY_DATA_OR 0x10000000
-#define PHY_DATA_AND 0x20000000
-#define PHY_BJMPN 0x30000000
-#define PHY_MDIO_CHG 0x40000000
-#define PHY_CLEAR_READCOUNT 0x70000000
-#define PHY_WRITE 0x80000000
-#define PHY_READCOUNT_EQ_SKIP 0x90000000
-#define PHY_COMP_EQ_SKIPN 0xa0000000
-#define PHY_COMP_NEQ_SKIPN 0xb0000000
-#define PHY_WRITE_PREVIOUS 0xc0000000
-#define PHY_SKIPN 0xd0000000
-#define PHY_DELAY_MS 0xe0000000
-
-struct fw_info {
- u32 magic;
- char version[RTL_VER_SIZE];
- __le32 fw_start;
- __le32 fw_len;
- u8 chksum;
-} __packed;
-
-#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
-
-static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- const struct firmware *fw = rtl_fw->fw;
- struct fw_info *fw_info = (struct fw_info *)fw->data;
- struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
- char *version = rtl_fw->version;
- bool rc = false;
-
- if (fw->size < FW_OPCODE_SIZE)
- goto out;
-
- if (!fw_info->magic) {
- size_t i, size, start;
- u8 checksum = 0;
-
- if (fw->size < sizeof(*fw_info))
- goto out;
-
- for (i = 0; i < fw->size; i++)
- checksum += fw->data[i];
- if (checksum != 0)
- goto out;
-
- start = le32_to_cpu(fw_info->fw_start);
- if (start > fw->size)
- goto out;
-
- size = le32_to_cpu(fw_info->fw_len);
- if (size > (fw->size - start) / FW_OPCODE_SIZE)
- goto out;
-
- memcpy(version, fw_info->version, RTL_VER_SIZE);
-
- pa->code = (__le32 *)(fw->data + start);
- pa->size = size;
- } else {
- if (fw->size % FW_OPCODE_SIZE)
- goto out;
-
- strlcpy(version, tp->fw_name, RTL_VER_SIZE);
-
- pa->code = (__le32 *)fw->data;
- pa->size = fw->size / FW_OPCODE_SIZE;
- }
- version[RTL_VER_SIZE - 1] = 0;
-
- rc = true;
-out:
- return rc;
-}
-
-static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
- struct rtl_fw_phy_action *pa)
-{
- bool rc = false;
- size_t index;
-
- for (index = 0; index < pa->size; index++) {
- u32 action = le32_to_cpu(pa->code[index]);
- u32 regno = (action & 0x0fff0000) >> 16;
-
- switch(action & 0xf0000000) {
- case PHY_READ:
- case PHY_DATA_OR:
- case PHY_DATA_AND:
- case PHY_MDIO_CHG:
- case PHY_CLEAR_READCOUNT:
- case PHY_WRITE:
- case PHY_WRITE_PREVIOUS:
- case PHY_DELAY_MS:
- break;
-
- case PHY_BJMPN:
- if (regno > index) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
- case PHY_READCOUNT_EQ_SKIP:
- if (index + 2 >= pa->size) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
- case PHY_COMP_EQ_SKIPN:
- case PHY_COMP_NEQ_SKIPN:
- case PHY_SKIPN:
- if (index + 1 + regno >= pa->size) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
-
- default:
- netif_err(tp, ifup, tp->dev,
- "Invalid action 0x%08x\n", action);
- goto out;
- }
- }
- rc = true;
-out:
- return rc;
-}
-
-static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- struct net_device *dev = tp->dev;
- int rc = -EINVAL;
-
- if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firmware\n");
- goto out;
- }
-
- if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
- rc = 0;
-out:
- return rc;
-}
-
-static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
- struct mdio_ops org, *ops = &tp->mdio_ops;
- u32 predata, count;
- size_t index;
-
- predata = count = 0;
- org.write = ops->write;
- org.read = ops->read;
-
- for (index = 0; index < pa->size; ) {
- u32 action = le32_to_cpu(pa->code[index]);
- u32 data = action & 0x0000ffff;
- u32 regno = (action & 0x0fff0000) >> 16;
-
- if (!action)
- break;
-
- switch(action & 0xf0000000) {
- case PHY_READ:
- predata = rtl_readphy(tp, regno);
- count++;
- index++;
- break;
- case PHY_DATA_OR:
- predata |= data;
- index++;
- break;
- case PHY_DATA_AND:
- predata &= data;
- index++;
- break;
- case PHY_BJMPN:
- index -= regno;
- break;
- case PHY_MDIO_CHG:
- if (data == 0) {
- ops->write = org.write;
- ops->read = org.read;
- } else if (data == 1) {
- ops->write = mac_mcu_write;
- ops->read = mac_mcu_read;
- }
-
- index++;
- break;
- case PHY_CLEAR_READCOUNT:
- count = 0;
- index++;
- break;
- case PHY_WRITE:
- rtl_writephy(tp, regno, data);
- index++;
- break;
- case PHY_READCOUNT_EQ_SKIP:
- index += (count == data) ? 2 : 1;
- break;
- case PHY_COMP_EQ_SKIPN:
- if (predata == data)
- index += regno;
- index++;
- break;
- case PHY_COMP_NEQ_SKIPN:
- if (predata != data)
- index += regno;
- index++;
- break;
- case PHY_WRITE_PREVIOUS:
- rtl_writephy(tp, regno, predata);
- index++;
- break;
- case PHY_SKIPN:
- index += regno + 1;
- break;
- case PHY_DELAY_MS:
- mdelay(data);
- index++;
- break;
-
- default:
- BUG();
- }
- }
-
- ops->write = org.write;
- ops->read = org.read;
-}
-
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
- release_firmware(tp->rtl_fw->fw);
+ rtl_fw_release_firmware(tp->rtl_fw);
kfree(tp->rtl_fw);
tp->rtl_fw = NULL;
}
@@ -2539,9 +2291,9 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
if (tp->rtl_fw)
- rtl_phy_write_fw(tp, tp->rtl_fw);
+ rtl_fw_write_firmware(tp, tp->rtl_fw);
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2578,9 +2330,7 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
{
- phy_write(tp->phydev, 0x1f, 0x0a43);
- phy_set_bits(tp->phydev, 0x11, BIT(4));
- phy_write(tp->phydev, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
}
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
@@ -2910,50 +2660,59 @@ static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
rtl8168c_3_hw_phy_config(tp);
}
-static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
+static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
- };
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+};
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+};
- rtl_writephy_batch(tp, phy_reg_init_0);
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
/*
* Rx Error Issue
@@ -2964,17 +2723,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
- };
int val;
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
val = rtl_readphy(tp, 0x0d);
@@ -3023,62 +2774,12 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
-
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
-
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init_0);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
-
- { 0x1f, 0x0002 }
- };
int val;
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
@@ -3528,20 +3229,15 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
{
- phy_write(tp->phydev, 0x1f, 0x0a43);
- phy_clear_bits(tp->phydev, 0x10, BIT(2));
+ phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
}
static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- phy_write(phydev, 0x1f, 0x0bcc);
- phy_clear_bits(phydev, 0x14, BIT(8));
-
- phy_write(phydev, 0x1f, 0x0a44);
- phy_set_bits(phydev, 0x11, BIT(7) | BIT(6));
-
+ phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x8084);
phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
@@ -3552,43 +3248,36 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
{
+ int ret;
+
rtl_apply_firmware(tp);
- rtl_writephy(tp, 0x1f, 0x0a46);
- if (rtl_readphy(tp, 0x10) & 0x0100) {
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000);
- } else {
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000);
- }
+ ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
+ if (ret & BIT(8))
+ phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
+ else
+ phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
- rtl_writephy(tp, 0x1f, 0x0a46);
- if (rtl_readphy(tp, 0x13) & 0x0100) {
- rtl_writephy(tp, 0x1f, 0x0c41);
- rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000);
- } else {
- rtl_writephy(tp, 0x1f, 0x0c41);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002);
- }
+ ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
+ if (ret & BIT(8))
+ phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
+ else
+ phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
/* Enable PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* EEE auto-fallback function */
- rtl_writephy(tp, 0x1f, 0x0a4b);
- rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
rtl_writephy(tp, 0x13, 0x8012);
rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Improve SWR Efficiency */
rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3600,6 +3289,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x1065);
rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065);
+ rtl_writephy(tp, 0x1f, 0x0000);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3684,14 +3374,10 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* enable GPHY 10M */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
/* SAR ADC performance */
- rtl_writephy(tp, 0x1f, 0x0bca);
- rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
rtl_writephy(tp, 0x1f, 0x0a43);
rtl_writephy(tp, 0x13, 0x803f);
@@ -3711,9 +3397,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* disable phy pfm mode */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3743,9 +3427,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* enable GPHY 10M */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
data = r8168_mac_ocp_read(tp, 0xdd02);
@@ -3781,9 +3463,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* disable phy pfm mode */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3793,16 +3473,12 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
{
/* Enable PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
- rtl_writephy(tp, 0x1f, 0x0a4b);
- rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3811,9 +3487,7 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* set rg_sel_sdm_rate */
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3831,9 +3505,7 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* Set rg_sel_sdm_rate */
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Channel estimation parameters */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3985,7 +3657,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
{
static const rtl_generic_fct phy_configs[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] = NULL,
[RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
[RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
[RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
@@ -4050,12 +3721,6 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static bool rtl_tbi_enabled(struct rtl8169_private *tp)
-{
- return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
-}
-
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
rtl_hw_phy_config(dev);
@@ -4124,31 +3789,6 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(tp->phydev, ifr, cmd);
}
-static void rtl_init_mdio_ops(struct rtl8169_private *tp)
-{
- struct mdio_ops *ops = &tp->mdio_ops;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- ops->write = r8168dp_1_mdio_write;
- ops->read = r8168dp_1_mdio_read;
- break;
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- ops->write = r8168dp_2_mdio_write;
- ops->read = r8168dp_2_mdio_read;
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ops->write = r8168g_mdio_write;
- ops->read = r8168g_mdio_read;
- break;
- default:
- ops->write = r8169_mdio_write;
- ops->read = r8169_mdio_read;
- break;
- }
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4168,7 +3808,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
}
}
-static void r8168_pll_power_down(struct rtl8169_private *tp)
+static void rtl_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
return;
@@ -4203,10 +3843,12 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
+ default:
+ break;
}
}
-static void r8168_pll_power_up(struct rtl8169_private *tp)
+static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
@@ -4230,6 +3872,8 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
break;
+ default:
+ break;
}
phy_resume(tp->phydev);
@@ -4237,32 +3881,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
msleep(20);
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
- break;
- default:
- r8168_pll_power_down(tp);
- }
-}
-
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
- break;
- default:
- r8168_pll_power_up(tp);
- }
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
@@ -4285,24 +3907,6 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- if (tp->jumbo_ops.enable) {
- rtl_unlock_config_regs(tp);
- tp->jumbo_ops.enable(tp);
- rtl_lock_config_regs(tp);
- }
-}
-
-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- if (tp->jumbo_ops.disable) {
- rtl_unlock_config_regs(tp);
- tp->jumbo_ops.disable(tp);
- rtl_lock_config_regs(tp);
- }
-}
-
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
@@ -4369,55 +3973,56 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
-static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
+static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
- struct jumbo_ops *ops = &tp->jumbo_ops;
-
+ rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
- ops->disable = r8168b_0_hw_jumbo_disable;
- ops->enable = r8168b_0_hw_jumbo_enable;
+ r8168b_0_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
- ops->disable = r8168b_1_hw_jumbo_disable;
- ops->enable = r8168b_1_hw_jumbo_enable;
+ r8168b_1_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- ops->disable = r8168c_hw_jumbo_disable;
- ops->enable = r8168c_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ r8168c_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- ops->disable = r8168dp_hw_jumbo_disable;
- ops->enable = r8168dp_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ r8168dp_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- ops->disable = r8168e_hw_jumbo_disable;
- ops->enable = r8168e_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ r8168e_hw_jumbo_enable(tp);
+ break;
+ default:
break;
+ }
+ rtl_lock_config_regs(tp);
+}
- /*
- * No action needed for jumbo frames with 8169.
- * No jumbo for 810x at all.
- */
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_unlock_config_regs(tp);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ r8168b_0_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ r8168b_1_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ r8168c_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ r8168dp_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ r8168e_hw_jumbo_disable(tp);
+ break;
default:
- ops->disable = NULL;
- ops->enable = NULL;
break;
}
+ rtl_lock_config_regs(tp);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4435,35 +4040,28 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
static void rtl_request_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
- int rc = -ENOMEM;
/* firmware loaded already or no firmware available */
if (tp->rtl_fw || !tp->fw_name)
return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw)
- goto err_warn;
-
- rc = request_firmware(&rtl_fw->fw, tp->fw_name, tp_to_dev(tp));
- if (rc < 0)
- goto err_free;
-
- rc = rtl_check_firmware(tp, rtl_fw);
- if (rc < 0)
- goto err_release_firmware;
-
- tp->rtl_fw = rtl_fw;
+ if (!rtl_fw) {
+ netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
+ return;
+ }
- return;
+ rtl_fw->phy_write = rtl_writephy;
+ rtl_fw->phy_read = rtl_readphy;
+ rtl_fw->mac_mcu_write = mac_mcu_write;
+ rtl_fw->mac_mcu_read = mac_mcu_read;
+ rtl_fw->fw_name = tp->fw_name;
+ rtl_fw->dev = tp_to_dev(tp);
-err_release_firmware:
- release_firmware(rtl_fw->fw);
-err_free:
- kfree(rtl_fw);
-err_warn:
- netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
- tp->fw_name, rc);
+ if (rtl_fw_request_firmware(rtl_fw))
+ kfree(rtl_fw);
+ else
+ tp->rtl_fw = rtl_fw;
}
static void rtl_rx_close(struct rtl8169_private *tp)
@@ -5834,7 +5432,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
static void r8169_csum_workaround(struct rtl8169_private *tp,
struct sk_buff *skb)
{
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
netdev_features_t features = tp->dev->features;
struct sk_buff *segs, *nskb;
@@ -5857,11 +5455,8 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
rtl8169_start_xmit(skb, tp->dev);
} else {
- struct net_device_stats *stats;
-
drop:
- stats = &tp->dev->stats;
- stats->tx_dropped++;
+ tp->dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -5889,8 +5484,7 @@ static int msdn_giant_send_check(struct sk_buff *skb)
return ret;
}
-static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
- struct sk_buff *skb, u32 *opts)
+static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
{
u32 mss = skb_shinfo(skb)->gso_size;
@@ -5907,8 +5501,6 @@ static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
else
WARN_ON_ONCE(1);
}
-
- return true;
}
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
@@ -5998,6 +5590,18 @@ static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
return slots_avail > nr_frags;
}
+/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
+static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ return false;
+ default:
+ return true;
+ }
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -6017,12 +5621,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+ opts[1] = rtl8169_tx_vlan_tag(skb);
opts[0] = DescOwn;
- if (!tp->tso_csum(tp, skb, opts)) {
- r8169_csum_workaround(tp, skb);
- return NETDEV_TX_OK;
+ if (rtl_chip_supports_csum_v2(tp)) {
+ if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
+ r8169_csum_workaround(tp, skb);
+ return NETDEV_TX_OK;
+ }
+ } else {
+ rtl8169_tso_csum_v1(skb, opts);
}
len = skb_headlen(skb);
@@ -6264,14 +5872,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dev->stats.rx_length_errors++;
if (status & RxCRC)
dev->stats.rx_crc_errors++;
- /* RxFOVF is a reserved bit on later chip versions */
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 &&
- status & RxFOVF) {
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- dev->stats.rx_fifo_errors++;
- } else if (status & (RxRUNT | RxCRC) &&
- !(status & RxRWT) &&
- dev->features & NETIF_F_RXALL) {
+ if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
+ dev->features & NETIF_F_RXALL) {
goto process_pkt;
}
} else {
@@ -6451,7 +6053,10 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (ret)
return ret;
- if (!tp->supports_gmii)
+ if (tp->supports_gmii)
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ else
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
@@ -7046,42 +6651,23 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
data |= (1 << 15);
r8168_mac_ocp_write(tp, 0xe8de, data);
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
-}
-
-static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
-{
- rtl8168ep_stop_cmac(tp);
- rtl_hw_init_8168g(tp);
+ rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
+ rtl8168ep_stop_cmac(tp);
+ /* fall through */
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
- rtl_hw_init_8168ep(tp);
- break;
default:
break;
}
}
-/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
-static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
- return false;
- default:
- return true;
- }
-}
-
static int rtl_jumbo_max(struct rtl8169_private *tp)
{
/* Non-GBit versions don't support jumbo frames */
@@ -7090,7 +6676,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
switch (tp->mac_version) {
/* RTL8169 */
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
case RTL_GIGA_MAC_VER_11:
@@ -7204,11 +6790,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (tp->mac_version == RTL_GIGA_MAC_NONE)
return -ENODEV;
- if (rtl_tbi_enabled(tp)) {
- dev_err(&pdev->dev, "TBI fiber mode not supported\n");
- return -ENODEV;
- }
-
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
@@ -7232,9 +6813,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- rtl_init_mdio_ops(tp);
- rtl_init_jumbo_ops(tp);
-
chipset = tp->mac_version;
rc = rtl_alloc_irq(tp);
@@ -7285,12 +6863,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (rtl_chip_supports_csum_v2(tp)) {
- tp->tso_csum = rtl8169_tso_csum_v2;
+ if (rtl_chip_supports_csum_v2(tp))
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- } else {
- tp->tso_csum = rtl8169_tso_csum_v1;
- }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3e5bc1fc3c46..079f459c73a5 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2210,6 +2210,10 @@ static int rocker_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
}
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bdfa6a19d620..7072b249c8bd 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -18,6 +18,7 @@
#include <net/neighbour.h>
#include <net/switchdev.h>
#include <net/ip_fib.h>
+#include <net/nexthop.h>
#include <net/arp.h>
#include "rocker.h"
@@ -2282,8 +2283,8 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
/* XXX support ECMP */
- nh = fi->fib_nh;
- nh_on_port = (fi->fib_dev == ofdpa_port->dev);
+ nh = fib_info_nh(fi, 0);
+ nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
has_gw = !!nh->fib_nh_gw4;
if (has_gw && nh_on_port) {
@@ -2733,11 +2734,13 @@ static int ofdpa_fib4_add(struct rocker *rocker,
{
struct ofdpa *ofdpa = rocker->wpriv;
struct ofdpa_port *ofdpa_port;
+ struct fib_nh *nh;
int err;
if (ofdpa->fib_aborted)
return 0;
- ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ nh = fib_info_nh(fen_info->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
return 0;
err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
@@ -2745,7 +2748,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
fen_info->tb_id, 0);
if (err)
return err;
- fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return 0;
}
@@ -2754,13 +2757,15 @@ static int ofdpa_fib4_del(struct rocker *rocker,
{
struct ofdpa *ofdpa = rocker->wpriv;
struct ofdpa_port *ofdpa_port;
+ struct fib_nh *nh;
if (ofdpa->fib_aborted)
return 0;
- ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ nh = fib_info_nh(fen_info->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
return 0;
- fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
@@ -2780,14 +2785,16 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
+ struct fib_nh *nh;
+
if (flow_entry->key.tbl_id !=
ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
continue;
- ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
- rocker);
+ nh = fib_info_nh(flow_entry->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
continue;
- flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 06545d7399fc..0b5c8d74c683 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,6 +13,16 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_SELFTESTS
+ bool "Support for STMMAC Selftests"
+ depends on INET
+ depends on STMMAC_ETH
+ default n
+ ---help---
+ This adds support for STMMAC Selftests using ethtool. Enable this
+ feature if you are facing problems with your HW and submit the test
+ results to the netdev Mailing List.
+
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c529c21e9bdd..c59926d96bcc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -8,6 +8,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
$(stmmac-y)
+stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
+
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 272b9ca66314..fdd4c042544d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -261,7 +261,7 @@ struct stmmac_safety_stats {
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 25
+#define STMMAC_TX_FRAMES 1
/* Packets types */
enum packets_types {
@@ -424,6 +424,7 @@ struct mac_device_info {
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
+ const struct stmmac_mmc_ops *mmc;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 126b66bb73a6..79f2ee37afed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -298,6 +299,9 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
return 0;
}
@@ -307,6 +311,9 @@ static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
const struct mediatek_dwmac_variant *variant = plat->variant;
clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
}
static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -349,6 +356,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->has_gmac = 0;
plat_dat->pmt = 0;
+ plat_dat->riwt_off = 1;
plat_dat->maxmtu = ETH_DATA_LEN;
plat_dat->bsp_priv = priv_plat;
plat_dat->init = mediatek_dwmac_init;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 7fdd1760a74c..5ae474ebaaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson6 and Meson8 DWMAC glue layer
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index c5979569fd60..c06295ec1ef0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson8b, Meson8m2 and GXBB DWMAC glue layer
*
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index d466e33635b0..d939f7b99b94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -38,9 +38,12 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000100
#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+#define SYSMGR_FPGAINTF_EMAC_REG 0x00000070
+#define SYSMGR_FPGAINTF_EMAC_BIT 0x1
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
@@ -48,6 +51,11 @@
#define EMAC_SPLITTER_CTRL_SPEED_100 0x3
#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0
+struct socfpga_dwmac;
+struct socfpga_dwmac_ops {
+ int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv);
+};
+
struct socfpga_dwmac {
int interface;
u32 reg_offset;
@@ -59,6 +67,7 @@ struct socfpga_dwmac {
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
struct tse_pcs pcs;
+ const struct socfpga_dwmac_ops *ops;
};
static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
@@ -233,25 +242,36 @@ err_node_put:
return ret;
}
-static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
+static int socfpga_set_phy_mode_common(int phymode, u32 *val)
{
- struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
- int phymode = dwmac->interface;
- u32 reg_offset = dwmac->reg_offset;
- u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val, module;
-
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
- val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
- val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII;
break;
default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val)) {
dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
return -EINVAL;
}
@@ -302,6 +322,62 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
return 0;
}
+static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val))
+ return -EINVAL;
+
+ /* Overwrite val to GMII if splitter core is enabled. The phymode here
+ * is the actual phy mode on phy hardware, but phy interface from
+ * EMAC core is GMII.
+ */
+ if (dwmac->splitter_base)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
+ /* Assert reset to the enet controller before changing the phy mode */
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK);
+ ctrl |= val;
+
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
+ ctrl |= SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ &module);
+ module |= (SYSMGR_FPGAINTF_EMAC_BIT << reg_shift);
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ module);
+ } else {
+ ctrl &= ~SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ }
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
+ /* Deassert reset for the phy configuration to be sampled by
+ * the enet controller, and operation to start in requested mode
+ */
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -311,6 +387,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct socfpga_dwmac *dwmac;
struct net_device *ndev;
struct stmmac_priv *stpriv;
+ const struct socfpga_dwmac_ops *ops;
+
+ ops = device_get_match_data(&pdev->dev);
+ if (!ops) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -341,6 +424,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->ops = ops;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
@@ -357,7 +441,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
*/
dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
- ret = socfpga_dwmac_set_phy_mode(dwmac);
+ ret = ops->set_phy_mode(dwmac);
if (ret)
goto err_dvr_remove;
@@ -376,8 +460,9 @@ static int socfpga_dwmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
- socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+ dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
/* Before the enet controller is suspended, the phy is suspended.
* This causes the phy clock to be gated. The enet controller is
@@ -404,8 +489,17 @@ static int socfpga_dwmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
socfpga_dwmac_resume);
+static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
+ .set_phy_mode = socfpga_gen5_set_phy_mode,
+};
+
+static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
+ .set_phy_mode = socfpga_gen10_set_phy_mode,
+};
+
static const struct of_device_id socfpga_dwmac_match[] = {
- { .compatible = "altr,socfpga-stmmac" },
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
+ { .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a69c34f605b1..b15c6d5dbd38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -977,6 +977,18 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
+static void sun8i_dwmac_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + EMAC_BASIC_CTL0);
+
+ if (enable)
+ value |= EMAC_LOOPBACK;
+ else
+ value &= ~EMAC_LOOPBACK;
+
+ writel(value, ioaddr + EMAC_BASIC_CTL0);
+}
+
static const struct stmmac_ops sun8i_dwmac_ops = {
.core_init = sun8i_dwmac_core_init,
.set_mac = sun8i_dwmac_set_mac,
@@ -986,6 +998,7 @@ static const struct stmmac_ops sun8i_dwmac_ops = {
.flow_ctrl = sun8i_dwmac_flow_ctrl,
.set_umac_addr = sun8i_dwmac_set_umac_addr,
.get_umac_addr = sun8i_dwmac_get_umac_addr,
+ .set_mac_loopback = sun8i_dwmac_set_mac_loopback,
};
static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 184ca13c8f79..56a69fb6f0b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -146,6 +146,7 @@ enum inter_frame_gap {
#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0877bde6e860..ebe41dd09bab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -172,7 +172,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
memset(mc_filter, 0, sizeof(mc_filter));
if (dev->flags & IFF_PROMISC) {
- value = GMAC_FRAME_FILTER_PR;
+ value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
} else if (dev->flags & IFF_ALLMULTI) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
} else if (!netdev_mc_empty(dev)) {
@@ -198,6 +198,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
}
}
+ value |= GMAC_FRAME_FILTER_HPF;
dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses (perfect filtering) */
@@ -216,6 +217,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg));
reg++;
}
+
+ while (reg <= perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
#ifdef FRAME_FILTER_DEBUG
@@ -499,6 +506,18 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
+static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ if (enable)
+ value |= GMAC_CONTROL_LM;
+ else
+ value &= ~GMAC_CONTROL_LM;
+
+ writel(value, ioaddr + GMAC_CONTROL);
+}
+
const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.set_mac = stmmac_set_mac,
@@ -518,6 +537,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
.pcs_rane = dwmac1000_rane,
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
+ .set_mac_loopback = dwmac1000_set_mac_loopback,
};
int dwmac1000_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index b735143987e1..d621b5189c41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -160,6 +160,18 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
return;
}
+static void dwmac100_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (enable)
+ value |= MAC_CONTROL_OM;
+ else
+ value &= ~MAC_CONTROL_OM;
+
+ writel(value, ioaddr + MAC_CONTROL);
+}
+
const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
.set_mac = stmmac_set_mac,
@@ -171,6 +183,7 @@ const struct stmmac_ops dwmac100_ops = {
.pmt = dwmac100_pmt,
.set_umac_addr = dwmac100_set_umac_addr,
.get_umac_addr = dwmac100_get_umac_addr,
+ .set_mac_loopback = dwmac100_set_mac_loopback,
};
int dwmac100_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index eb013d54025a..01c10893b7a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -64,6 +64,8 @@
#define GMAC_PACKET_FILTER_PR BIT(0)
#define GMAC_PACKET_FILTER_HMC BIT(2)
#define GMAC_PACKET_FILTER_PM BIT(4)
+#define GMAC_PACKET_FILTER_PCF BIT(7)
+#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_MAX_PERFECT_ADDRESSES 128
@@ -160,6 +162,7 @@ enum power_event {
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
#define GMAC_CONFIG_TE BIT(1)
#define GMAC_CONFIG_RE BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index b4bb5629de38..98b648b0f317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -406,7 +406,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
unsigned int value = 0;
if (dev->flags & IFF_PROMISC) {
- value = GMAC_PACKET_FILTER_PR;
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
/* Pass all multi */
@@ -440,20 +440,28 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
}
+ value |= GMAC_PACKET_FILTER_HPF;
+
/* Handle multiple unicast addresses */
if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
value |= GMAC_PACKET_FILTER_PR;
- } else if (!netdev_uc_empty(dev)) {
- int reg = 1;
+ } else {
struct netdev_hw_addr *ha;
+ int reg = 1;
netdev_for_each_uc_addr(ha, dev) {
dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
+
+ while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
writel(value, ioaddr + GMAC_PACKET_FILTER);
@@ -471,8 +479,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (fc & FLOW_RX) {
pr_debug("\tReceive Flow-Control ON\n");
flow |= GMAC_RX_FLOW_CTRL_RFE;
- writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
}
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");
@@ -480,7 +489,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
for (queue = 0; queue < tx_cnt; queue++) {
- flow |= GMAC_TX_FLOW_CTRL_TFE;
+ flow = GMAC_TX_FLOW_CTRL_TFE;
if (duplex)
flow |=
@@ -488,6 +497,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
+ } else {
+ for (queue = 0; queue < tx_cnt; queue++)
+ writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}
@@ -703,6 +715,18 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
+static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_LM;
+ else
+ value &= ~GMAC_CONFIG_LM;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
@@ -732,6 +756,7 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
const struct stmmac_ops dwmac410_ops = {
@@ -763,6 +788,7 @@ const struct stmmac_ops dwmac410_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
const struct stmmac_ops dwmac510_ops = {
@@ -799,6 +825,7 @@ const struct stmmac_ops dwmac510_ops = {
.safety_feat_dump = dwmac5_safety_feat_dump,
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 545cb9c47433..99f8a391964c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -88,10 +88,6 @@ void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
-
- value = readl(ioaddr + GMAC_CONFIG);
- value &= ~GMAC_CONFIG_RE;
- writel(value, ioaddr + GMAC_CONFIG);
}
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 085b700a4994..b8296eb41011 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -29,6 +29,7 @@
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
#define XGMAC_CONFIG_JE BIT(8)
#define XGMAC_CONFIG_WD BIT(7)
@@ -39,6 +40,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
#define XGMAC_FILTER_HMC BIT(2)
#define XGMAC_FILTER_PR BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 64b8cb88ea45..bfa7d6913fd4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -310,7 +310,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
u32 value = XGMAC_FILTER_RA;
if (dev->flags & IFF_PROMISC) {
- value |= XGMAC_FILTER_PR;
+ value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
value |= XGMAC_FILTER_PM;
@@ -321,6 +321,18 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
}
+static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable)
+ value |= XGMAC_CONFIG_LM;
+ else
+ value &= ~XGMAC_CONFIG_LM;
+
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -350,6 +362,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index e79037f511e1..7861a938420a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -299,10 +299,6 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
value &= ~XGMAC_RXST;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
-
- value = readl(ioaddr + XGMAC_RX_CONFIG);
- value &= ~XGMAC_CONFIG_RE;
- writel(value, ioaddr + XGMAC_RX_CONFIG);
}
static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 81b966a8261b..6c61b753b55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -81,6 +81,7 @@ static const struct stmmac_hwif_entry {
const void *hwtimestamp;
const void *mode;
const void *tc;
+ const void *mmc;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -100,6 +101,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = NULL,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac100_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
@@ -117,6 +119,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = NULL,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac1000_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
@@ -134,6 +137,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -151,6 +155,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -168,6 +173,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -185,6 +191,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -202,6 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
+ .mmc = NULL,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
@@ -267,6 +275,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->ptp = mac->ptp ? : entry->hwtimestamp;
mac->mode = mac->mode ? : entry->mode;
mac->tc = mac->tc ? : entry->tc;
+ mac->mmc = mac->mmc ? : entry->mmc;
priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 5bb00234d961..2acfbc70e3c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -6,6 +6,7 @@
#define __STMMAC_HWIF_H__
#include <linux/netdevice.h>
+#include <linux/stmmac.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -324,6 +325,8 @@ struct stmmac_ops {
int (*flex_pps_config)(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
+ /* Loopback for selftests */
+ void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
};
#define stmmac_core_init(__priv, __args...) \
@@ -392,6 +395,8 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, rxp_config, __args)
#define stmmac_flex_pps_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
+#define stmmac_set_mac_loopback(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -464,6 +469,21 @@ struct stmmac_tc_ops {
#define stmmac_tc_setup_cbs(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cbs, __args)
+struct stmmac_counters;
+
+struct stmmac_mmc_ops {
+ void (*ctrl)(void __iomem *ioaddr, unsigned int mode);
+ void (*intr_all_mask)(void __iomem *ioaddr);
+ void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc);
+};
+
+#define stmmac_mmc_ctrl(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, ctrl, __args)
+#define stmmac_mmc_intr_all_mask(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args)
+#define stmmac_mmc_read(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, read, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
@@ -482,6 +502,7 @@ extern const struct stmmac_tc_ops dwmac510_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index c037326331f5..e2bd90a4d34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,4 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_err_octets;
};
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
-
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index e9b04c28980f..b8c598125cfe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include "hwif.h"
#include "mmc.h"
/* MAC Management Counters register offset */
@@ -128,7 +129,7 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
-void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -141,7 +142,7 @@ void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
}
/* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
@@ -153,7 +154,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
-void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
@@ -266,3 +267,9 @@ void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
}
+
+const struct stmmac_mmc_ops dwmac_mmc_ops = {
+ .ctrl = dwmac_mmc_ctrl,
+ .intr_all_mask = dwmac_mmc_intr_all_mask,
+ .read = dwmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dd95d959c1ce..a16ada8b8507 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -229,4 +229,26 @@ int stmmac_dvr_probe(struct device *device,
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
+#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data);
+int stmmac_selftest_get_count(struct stmmac_priv *priv);
+#else
+static inline void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ /* Not enabled */
+}
+static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv,
+ u8 *data)
+{
+ /* Not enabled */
+}
+static inline int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_STMMAC_SELFTESTS */
+
#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e09522c5509a..cec51ba34296 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -537,7 +537,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
- dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
@@ -589,6 +589,8 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
}
return len;
+ case ETH_SS_TEST:
+ return stmmac_selftest_get_count(priv);
default:
return -EOPNOTSUPP;
}
@@ -625,6 +627,9 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_TEST:
+ stmmac_selftest_get_strings(priv, p);
+ break;
default:
WARN_ON(1);
break;
@@ -890,6 +895,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
+ .self_test = stmmac_selftest_run,
.get_ethtool_stats = stmmac_get_ethtool_stats,
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 65e57b9f6887..268af79e2632 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2058,6 +2058,9 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];
+ if (status)
+ status |= handle_rx | handle_tx;
+
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
napi_schedule_irqoff(&ch->rx_napi);
@@ -2128,10 +2131,10 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
netdev_info(priv->dev, "No MAC Management Counters available\n");
@@ -2164,8 +2167,8 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
- netdev_info(priv->dev, "device MAC address %pM\n",
- priv->dev->dev_addr);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
}
}
@@ -4241,9 +4244,8 @@ int stmmac_dvr_probe(struct device *device,
u32 queue, maxq;
int ret = 0;
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
- MTL_MAX_TX_QUEUES,
- MTL_MAX_RX_QUEUES);
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
if (!ndev)
return -ENOMEM;
@@ -4275,8 +4277,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
dev_err(priv->device, "failed to create workqueue\n");
- ret = -ENOMEM;
- goto error_wq;
+ return -ENOMEM;
}
INIT_WORK(&priv->service_task, stmmac_service_task);
@@ -4432,8 +4433,6 @@ error_mdio_register:
}
error_hw_init:
destroy_workqueue(priv->wq);
-error_wq:
- free_netdev(ndev);
return ret;
}
@@ -4470,7 +4469,6 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
- free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
new file mode 100644
index 000000000000..a97b1ea76438
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
+ * stmmac Selftests Support
+ *
+ * Author: Jose Abreu <joabreu@synopsys.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/phy.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include "stmmac.h"
+
+struct stmmachdr {
+ __be32 version;
+ __be64 magic;
+ u8 id;
+} __packed;
+
+#define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
+ sizeof(struct stmmachdr))
+#define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
+#define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
+
+struct stmmac_packet_attrs {
+ int vlan;
+ int vlan_id_in;
+ int vlan_id_out;
+ unsigned char *src;
+ unsigned char *dst;
+ u32 ip_src;
+ u32 ip_dst;
+ int tcp;
+ int sport;
+ int dport;
+ u32 exp_hash;
+ int dont_wait;
+ int timeout;
+ int size;
+ int remove_sa;
+ u8 id;
+};
+
+static u8 stmmac_test_next_id;
+
+static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct sk_buff *skb = NULL;
+ struct udphdr *uhdr = NULL;
+ struct tcphdr *thdr = NULL;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct iphdr *ihdr;
+ int iplen, size;
+
+ size = attr->size + STMMAC_TEST_PKT_SIZE;
+ if (attr->vlan) {
+ size += 4;
+ if (attr->vlan > 1)
+ size += 4;
+ }
+
+ if (attr->tcp)
+ size += sizeof(struct tcphdr);
+ else
+ size += sizeof(struct udphdr);
+
+ skb = netdev_alloc_skb(priv->dev, size);
+ if (!skb)
+ return NULL;
+
+ prefetchw(skb->data);
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ if (attr->vlan > 1)
+ ehdr = skb_push(skb, ETH_HLEN + 8);
+ else if (attr->vlan)
+ ehdr = skb_push(skb, ETH_HLEN + 4);
+ else if (attr->remove_sa)
+ ehdr = skb_push(skb, ETH_HLEN - 6);
+ else
+ ehdr = skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+
+ skb_set_network_header(skb, skb->len);
+ ihdr = skb_put(skb, sizeof(*ihdr));
+
+ skb_set_transport_header(skb, skb->len);
+ if (attr->tcp)
+ thdr = skb_put(skb, sizeof(*thdr));
+ else
+ uhdr = skb_put(skb, sizeof(*uhdr));
+
+ if (!attr->remove_sa)
+ eth_zero_addr(ehdr->h_source);
+ eth_zero_addr(ehdr->h_dest);
+ if (attr->src && !attr->remove_sa)
+ ether_addr_copy(ehdr->h_source, attr->src);
+ if (attr->dst)
+ ether_addr_copy(ehdr->h_dest, attr->dst);
+
+ if (!attr->remove_sa) {
+ ehdr->h_proto = htons(ETH_P_IP);
+ } else {
+ __be16 *ptr = (__be16 *)ehdr;
+
+ /* HACK */
+ ptr[3] = htons(ETH_P_IP);
+ }
+
+ if (attr->vlan) {
+ __be16 *tag, *proto;
+
+ if (!attr->remove_sa) {
+ tag = (void *)ehdr + ETH_HLEN;
+ proto = (void *)ehdr + (2 * ETH_ALEN);
+ } else {
+ tag = (void *)ehdr + ETH_HLEN - 6;
+ proto = (void *)ehdr + ETH_ALEN;
+ }
+
+ proto[0] = htons(ETH_P_8021Q);
+ tag[0] = htons(attr->vlan_id_out);
+ tag[1] = htons(ETH_P_IP);
+ if (attr->vlan > 1) {
+ proto[0] = htons(ETH_P_8021AD);
+ tag[1] = htons(ETH_P_8021Q);
+ tag[2] = htons(attr->vlan_id_in);
+ tag[3] = htons(ETH_P_IP);
+ }
+ }
+
+ if (attr->tcp) {
+ thdr->source = htons(attr->sport);
+ thdr->dest = htons(attr->dport);
+ thdr->doff = sizeof(struct tcphdr) / 4;
+ thdr->check = 0;
+ } else {
+ uhdr->source = htons(attr->sport);
+ uhdr->dest = htons(attr->dport);
+ uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
+ uhdr->check = 0;
+ }
+
+ ihdr->ihl = 5;
+ ihdr->ttl = 32;
+ ihdr->version = 4;
+ if (attr->tcp)
+ ihdr->protocol = IPPROTO_TCP;
+ else
+ ihdr->protocol = IPPROTO_UDP;
+ iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
+ if (attr->tcp)
+ iplen += sizeof(*thdr);
+ else
+ iplen += sizeof(*uhdr);
+ ihdr->tot_len = htons(iplen);
+ ihdr->frag_off = 0;
+ ihdr->saddr = 0;
+ ihdr->daddr = htonl(attr->ip_dst);
+ ihdr->tos = 0;
+ ihdr->id = 0;
+ ip_send_check(ihdr);
+
+ shdr = skb_put(skb, sizeof(*shdr));
+ shdr->version = 0;
+ shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
+ attr->id = stmmac_test_next_id;
+ shdr->id = stmmac_test_next_id++;
+
+ if (attr->size)
+ skb_put(skb, attr->size);
+
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ if (attr->tcp) {
+ thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ } else {
+ udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
+ }
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = priv->dev;
+
+ return skb;
+}
+
+struct stmmac_test_priv {
+ struct stmmac_packet_attrs *packet;
+ struct packet_type pt;
+ struct completion comp;
+ int double_vlan;
+ int vlan_id;
+ int ok;
+};
+
+static int stmmac_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct tcphdr *thdr;
+ struct iphdr *ihdr;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (tpriv->packet->dst) {
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+ }
+ if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ goto out;
+ }
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+
+ if (tpriv->packet->tcp) {
+ if (ihdr->protocol != IPPROTO_TCP)
+ goto out;
+
+ thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (thdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
+ } else {
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ }
+
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+ if (tpriv->packet->exp_hash && !skb->hash)
+ goto out;
+ if (tpriv->packet->id != shdr->id)
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __stmmac_test_loopback(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_loopback_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = attr;
+ dev_add_pack(&tpriv->pt);
+
+ skb = stmmac_test_get_udp_skb(priv, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto cleanup;
+
+ if (attr->dont_wait)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = STMMAC_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tpriv->comp, attr->timeout);
+ ret = !tpriv->ok;
+
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dev->phydev)
+ return -EBUSY;
+
+ ret = phy_loopback(priv->dev->phydev, true);
+ if (ret)
+ return ret;
+
+ attr.dst = priv->dev->dev_addr;
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ phy_loopback(priv->dev->phydev, false);
+ return ret;
+}
+
+static int stmmac_test_mmc(struct stmmac_priv *priv)
+{
+ struct stmmac_counters initial, final;
+ int ret;
+
+ memset(&initial, 0, sizeof(initial));
+ memset(&final, 0, sizeof(final));
+
+ if (!priv->dma_cap.rmon)
+ return -EOPNOTSUPP;
+
+ /* Save previous results into internal struct */
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ return ret;
+
+ /* These will be loopback results so no need to save them */
+ stmmac_mmc_read(priv, priv->mmcaddr, &final);
+
+ /*
+ * The number of MMC counters available depends on HW configuration
+ * so we just use this one to validate the feature. I hope there is
+ * not a version without this counter.
+ */
+ if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stmmac_test_eee(struct stmmac_priv *priv)
+{
+ struct stmmac_extra_stats *initial, *final;
+ int retries = 10;
+ int ret;
+
+ if (!priv->dma_cap.eee || !priv->eee_active)
+ return -EOPNOTSUPP;
+
+ initial = kzalloc(sizeof(*initial), GFP_KERNEL);
+ if (!initial)
+ return -ENOMEM;
+
+ final = kzalloc(sizeof(*final), GFP_KERNEL);
+ if (!final) {
+ ret = -ENOMEM;
+ goto out_free_initial;
+ }
+
+ memcpy(initial, &priv->xstats, sizeof(*initial));
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ goto out_free_final;
+
+ /* We have no traffic in the line so, sooner or later it will go LPI */
+ while (--retries) {
+ memcpy(final, &priv->xstats, sizeof(*final));
+
+ if (final->irq_tx_path_in_lpi_mode_n >
+ initial->irq_tx_path_in_lpi_mode_n)
+ break;
+ msleep(100);
+ }
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_in_lpi_mode_n <=
+ initial->irq_tx_path_in_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_exit_lpi_mode_n <=
+ initial->irq_tx_path_exit_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+out_free_final:
+ kfree(final);
+out_free_initial:
+ kfree(initial);
+ return ret;
+}
+
+static int stmmac_filter_check(struct stmmac_priv *priv)
+{
+ if (!(priv->dev->flags & IFF_PROMISC))
+ return 0;
+
+ netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
+ return -EOPNOTSUPP;
+}
+
+static int stmmac_test_hfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
+ unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ ret = stmmac_filter_check(priv);
+ if (ret)
+ return ret;
+
+ ret = dev_mc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_mc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_test_pfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ ret = dev_uc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_uc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
+{
+ return 0;
+}
+
+static void stmmac_test_set_rx_mode(struct net_device *netdev)
+{
+ /* As we are in test mode of ethtool we already own the rtnl lock
+ * so no address will change from user. We can just call the
+ * ndo_set_rx_mode() callback directly */
+ if (netdev->netdev_ops->ndo_set_rx_mode)
+ netdev->netdev_ops->ndo_set_rx_mode(netdev);
+}
+
+static int stmmac_test_mcfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ /* Remove all MC addresses */
+ __dev_mc_unsync(priv->dev, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+
+ ret = dev_uc_add(priv->dev, uc_addr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = uc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = mc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_uc_del(priv->dev, uc_addr);
+ __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+ return ret;
+}
+
+static int stmmac_test_ucfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ /* Remove all UC addresses */
+ __dev_uc_unsync(priv->dev, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+
+ ret = dev_mc_add(priv->dev, mc_addr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = mc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = uc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_mc_del(priv->dev, mc_addr);
+ __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+ return ret;
+}
+
+static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct ethhdr *ehdr;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ goto out;
+ if (ehdr->h_proto != htons(ETH_P_PAUSE))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_flowctrl(struct stmmac_priv *priv)
+{
+ unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
+ struct phy_device *phydev = priv->dev->phydev;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_test_priv *tpriv;
+ unsigned int pkt_count;
+ int i, ret = 0;
+
+ if (!phydev || !phydev->pause)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+ tpriv->pt.type = htons(ETH_P_PAUSE);
+ tpriv->pt.func = stmmac_test_flowctrl_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ dev_add_pack(&tpriv->pt);
+
+ /* Compute minimum number of packets to make FIFO full */
+ pkt_count = priv->plat->rx_fifo_size;
+ if (!pkt_count)
+ pkt_count = priv->dma_cap.rx_fifo_size;
+ pkt_count /= 1400;
+ pkt_count *= 2;
+
+ for (i = 0; i < rx_cnt; i++)
+ stmmac_stop_rx(priv, priv->ioaddr, i);
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+ if (ret)
+ goto cleanup;
+
+ ret = dev_mc_add(priv->dev, paddr);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < pkt_count; i++) {
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ attr.dont_wait = true;
+ attr.size = 1400;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+ if (tpriv->ok)
+ break;
+ }
+
+ /* Wait for some time in case RX Watchdog is enabled */
+ msleep(200);
+
+ for (i = 0; i < rx_cnt; i++) {
+ struct stmmac_channel *ch = &priv->channel[i];
+
+ stmmac_start_rx(priv, priv->ioaddr, i);
+ local_bh_disable();
+ napi_reschedule(&ch->rx_napi);
+ local_bh_enable();
+ }
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+
+cleanup:
+ dev_mc_del(priv->dev, paddr);
+ dev_set_promiscuity(priv->dev, -1);
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#define STMMAC_LOOPBACK_NONE 0
+#define STMMAC_LOOPBACK_MAC 1
+#define STMMAC_LOOPBACK_PHY 2
+
+static const struct stmmac_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct stmmac_priv *priv);
+} stmmac_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = STMMAC_LOOPBACK_MAC,
+ .fn = stmmac_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
+ .fn = stmmac_test_phy_loopback,
+ }, {
+ .name = "MMC Counters ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mmc,
+ }, {
+ .name = "EEE ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_eee,
+ }, {
+ .name = "Hash Filter MC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_hfilt,
+ }, {
+ .name = "Perfect Filter UC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_pfilt,
+ }, {
+ .name = "MC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mcfilt,
+ }, {
+ .name = "UC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_ucfilt,
+ }, {
+ .name = "Flow Control ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_flowctrl,
+ },
+};
+
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int count = stmmac_selftest_get_count(priv);
+ int carrier = netif_carrier_ok(dev);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ stmmac_test_next_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(priv->dev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!carrier) {
+ netdev_err(priv->dev, "You need valid Link to execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* We don't want extra traffic */
+ netif_carrier_off(dev);
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case STMMAC_LOOPBACK_MAC:
+ ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
+ break;
+ case STMMAC_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loobpack. If any of
+ * them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(priv->dev, "Loopback is not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = stmmac_selftests[i].fn(priv);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case STMMAC_LOOPBACK_MAC:
+ stmmac_set_mac_loopback(priv, priv->ioaddr, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Restart everything */
+ if (carrier)
+ netif_carrier_on(dev);
+}
+
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
+ stmmac_selftests[i].name);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return ARRAY_SIZE(stmmac_selftests);
+}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 634fc484a0b3..6d3f1f3f90cb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1423,8 +1423,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
return 0;
err_cleanup:
- cpdma_ctlr_stop(cpsw->dma);
- for_each_slave(priv, cpsw_slave_stop, cpsw);
+ if (!cpsw->usage_count) {
+ cpdma_ctlr_stop(cpsw->dma);
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ }
+
pm_runtime_put_sync(cpsw->dev);
netif_carrier_off(priv->ndev);
return ret;
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index c0ecc6c7b5e0..cdfe7809e3c1 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1509,7 +1509,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr)
rcu_read_lock();
in_dev = __in_dev_get_rcu(vptr->netdev);
if (in_dev != NULL) {
- ifa = (struct in_ifaddr *) in_dev->ifa_list;
+ ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
res = 0;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index af96e05c5bcd..8d994cebb6b0 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,8 +26,8 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE
- select PHYLIB
+ depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
+ select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs.
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 1aeda084b8f1..276292bca334 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -361,7 +361,7 @@ struct temac_local {
/* For synchronization of indirect register access. Must be
* shared mutex between interfaces in same TEMAC block.
*/
- struct mutex *indirect_mutex;
+ spinlock_t *indirect_lock;
u32 options; /* Current options word */
int last_link;
unsigned int temac_features;
@@ -388,8 +388,9 @@ struct temac_local {
/* xilinx_temac.c */
int temac_indirect_busywait(struct temac_local *lp);
u32 temac_indirect_in32(struct temac_local *lp, int reg);
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg);
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
-
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value);
/* xilinx_temac_mdio.c */
int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 14870d659f7d..21c1b4322ea7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -22,7 +22,6 @@
*
* TODO:
* - Factor out locallink DMA code into separate driver
- * - Fix multicast assignment.
* - Fix support for hardware checksumming.
* - Testing. Lots and lots of testing.
*
@@ -53,6 +52,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -84,51 +84,118 @@ static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
return iowrite32(value, lp->regs + offset);
}
+static bool hard_acs_rdy(struct temac_local *lp)
+{
+ return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
+}
+
+static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
+{
+ ktime_t cur = ktime_get();
+
+ return hard_acs_rdy(lp) || ktime_after(cur, timeout);
+}
+
+/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
+ * that was used before, and should cover MDIO bus speed down to 3200
+ * Hz.
+ */
+#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
+
+/**
+ * temac_indirect_busywait - Wait for current indirect register access
+ * to complete.
+ */
int temac_indirect_busywait(struct temac_local *lp)
{
- unsigned long end = jiffies + 2;
+ ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
- while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (time_before_eq(end, jiffies)) {
- WARN_ON(1);
- return -ETIMEDOUT;
- }
- usleep_range(500, 1000);
- }
- return 0;
+ spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
+ if (WARN_ON(!hard_acs_rdy(lp)))
+ return -ETIMEDOUT;
+ else
+ return 0;
}
/**
- * temac_indirect_in32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_in32 - Indirect register read access. This function
+ * must be called without lp->indirect_lock being held.
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{
- u32 val;
+ unsigned long flags;
+ int val;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+ return val;
+}
- if (temac_indirect_busywait(lp))
+/**
+ * temac_indirect_in32_locked - Indirect register read access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
+{
+ /* This initial wait should normally not spin, as we always
+ * try to wait for indirect access to complete before
+ * releasing the indirect_lock.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
+ /* Initiate read from indirect register */
temac_iow(lp, XTE_CTL0_OFFSET, reg);
- if (temac_indirect_busywait(lp))
+ /* Wait for indirect register access to complete. We really
+ * should not see timeouts, and could even end up causing
+ * problem for following indirect access, so let's make a bit
+ * of WARN noise.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
- val = temac_ior(lp, XTE_LSW0_OFFSET);
-
- return val;
+ /* Value is ready now */
+ return temac_ior(lp, XTE_LSW0_OFFSET);
}
/**
- * temac_indirect_out32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_out32 - Indirect register write access. This function
+ * must be called without lp->indirect_lock being held.
*/
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{
- if (temac_indirect_busywait(lp))
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, reg, value);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+}
+
+/**
+ * temac_indirect_out32_locked - Indirect register write access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
+{
+ /* As in temac_indirect_in32_locked(), we should normally not
+ * spin here. And if it happens, we actually end up silently
+ * ignoring the write request. Ouch.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return;
+ /* Initiate write to indirect register */
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
- temac_indirect_busywait(lp);
+ /* As in temac_indirect_in32_locked(), we should not see timeouts
+ * here. And if it happens, we continue before the write has
+ * completed. Not good.
+ */
+ WARN_ON(temac_indirect_busywait(lp));
}
/**
@@ -344,20 +411,21 @@ out:
static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ unsigned long flags;
/* set up unicast MAC address filter set its mac address */
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_UAW0_OFFSET,
- (ndev->dev_addr[0]) |
- (ndev->dev_addr[1] << 8) |
- (ndev->dev_addr[2] << 16) |
- (ndev->dev_addr[3] << 24));
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
* so don't affect them Set MAC bits [47:32] in EUAW1 */
- temac_indirect_out32(lp, XTE_UAW1_OFFSET,
- (ndev->dev_addr[4] & 0x000000ff) |
- (ndev->dev_addr[5] << 8));
- mutex_unlock(lp->indirect_mutex);
+ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
}
static int temac_init_mac_address(struct net_device *ndev, const void *address)
@@ -383,49 +451,58 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- u32 multi_addr_msw, multi_addr_lsw, val;
- int i;
+ u32 multi_addr_msw, multi_addr_lsw;
+ int i = 0;
+ unsigned long flags;
+ bool promisc_mode_disabled = false;
- mutex_lock(lp->indirect_mutex);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
- /*
- * We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. If it was a promisc request the
- * flag is already set. If not we assert it.
- */
- ndev->flags |= IFF_PROMISC;
+ if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (!netdev_mc_empty(ndev)) {
+ return;
+ }
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+
+ if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (i >= MULTICAST_CAM_TABLE_NUM)
+ if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
break;
multi_addr_msw = ((ha->addr[3] << 24) |
(ha->addr[2] << 16) |
(ha->addr[1] << 8) |
(ha->addr[0]));
- temac_indirect_out32(lp, XTE_MAW0_OFFSET,
- multi_addr_msw);
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
+ multi_addr_msw);
multi_addr_lsw = ((ha->addr[5] << 8) |
(ha->addr[4]) | (i << 16));
- temac_indirect_out32(lp, XTE_MAW1_OFFSET,
- multi_addr_lsw);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
+ multi_addr_lsw);
i++;
}
- } else {
- val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
- temac_indirect_out32(lp, XTE_AFM_OFFSET,
- val & ~XTE_AFM_EPPRM_MASK);
- temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
- mutex_unlock(lp->indirect_mutex);
+
+ /* Clear all or remaining/unused address table entries */
+ while (i < MULTICAST_CAM_TABLE_NUM) {
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
+ i++;
+ }
+
+ /* Enable address filter block if currently disabled */
+ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
+ & XTE_AFM_EPPRM_MASK) {
+ temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
+ promisc_mode_disabled = true;
+ }
+
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
+ if (promisc_mode_disabled)
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
static struct temac_option {
@@ -516,17 +593,19 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
+ unsigned long flags;
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
while (tp->opt) {
- reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
- if (options & tp->opt)
+ reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
+ if (options & tp->opt) {
reg |= tp->m_or;
- temac_indirect_out32(lp, tp->reg, reg);
+ temac_indirect_out32_locked(lp, tp->reg, reg);
+ }
tp++;
}
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
lp->options |= options;
- mutex_unlock(lp->indirect_mutex);
return 0;
}
@@ -537,6 +616,7 @@ static void temac_device_reset(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
u32 timeout;
u32 val;
+ unsigned long flags;
/* Perform a software reset */
@@ -545,7 +625,6 @@ static void temac_device_reset(struct net_device *ndev)
dev_dbg(&ndev->dev, "%s()\n", __func__);
- mutex_lock(lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
@@ -571,8 +650,11 @@ static void temac_device_reset(struct net_device *ndev)
}
/* Disable the receiver */
- val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
+ val & ~XTE_RXC1_RXEN_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
@@ -592,12 +674,12 @@ static void temac_device_reset(struct net_device *ndev)
"temac_device_reset descriptor allocation failed\n");
}
- temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
- temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
- temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
-
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
@@ -621,13 +703,14 @@ static void temac_adjust_link(struct net_device *ndev)
struct phy_device *phy = ndev->phydev;
u32 mii_speed;
int link_state;
+ unsigned long flags;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
- mutex_lock(lp->indirect_mutex);
if (lp->last_link != link_state) {
- mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
@@ -637,11 +720,12 @@ static void temac_adjust_link(struct net_device *ndev)
}
/* Write new speed setting out to TEMAC */
- temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
+ temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
lp->last_link = link_state;
phy_print_status(phy);
}
- mutex_unlock(lp->indirect_mutex);
}
#ifdef CONFIG_64BIT
@@ -1011,6 +1095,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
+ .ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
@@ -1076,7 +1161,6 @@ static int temac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
@@ -1103,17 +1187,17 @@ static int temac_probe(struct platform_device *pdev)
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
- if (!pdata->indirect_mutex) {
+ if (!pdata->indirect_lock) {
dev_err(&pdev->dev,
- "indirect_mutex missing in platform_data\n");
+ "indirect_lock missing in platform_data\n");
return -EINVAL;
}
- lp->indirect_mutex = pdata->indirect_mutex;
+ lp->indirect_lock = pdata->indirect_lock;
} else {
- lp->indirect_mutex = devm_kmalloc(&pdev->dev,
- sizeof(*lp->indirect_mutex),
- GFP_KERNEL);
- mutex_init(lp->indirect_mutex);
+ lp->indirect_lock = devm_kmalloc(&pdev->dev,
+ sizeof(*lp->indirect_lock),
+ GFP_KERNEL);
+ spin_lock_init(lp->indirect_lock);
}
/* map device registers */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index a4667326f745..6fd2dea4e60f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -25,14 +25,15 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct temac_local *lp = bus->priv;
u32 rc;
+ unsigned long flags;
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
* in the LSW0 register */
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
- rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
- mutex_unlock(lp->indirect_mutex);
+ rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
@@ -43,6 +44,7 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
{
struct temac_local *lp = bus->priv;
+ unsigned long flags;
dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
@@ -50,10 +52,10 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
/* First write the desired value into the write data register
* and then write the address into the access initiator register
*/
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
- temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_MGTDR_OFFSET, val);
+ temac_indirect_out32_locked(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
return 0;
}
@@ -87,9 +89,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
/* Enable the MDIO bus by asserting the enable bit and writing
* in the clock config */
- mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
- mutex_unlock(lp->indirect_mutex);
bus = devm_mdiobus_alloc(&pdev->dev);
if (!bus)
@@ -116,10 +116,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
if (rc)
return rc;
- mutex_lock(lp->indirect_mutex);
dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
temac_indirect_in32(lp, XTE_MC_OFFSET));
- mutex_unlock(lp->indirect_mutex);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 011adae32b89..2dacfc85b3ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -83,6 +84,8 @@
#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XAXIDMA_SR_HALT_MASK 0x00000001 /* Indicates DMA channel halted */
+
#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
@@ -356,9 +359,6 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
- * @sw_id_offset: MM2S/S2MM Sw ID
- * @reserved5: Reserved and not used
- * @reserved6: Reserved and not used
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
@@ -373,11 +373,9 @@ struct axidma_bd {
u32 app1; /* TX start << 16 | insert */
u32 app2; /* TX csum seed */
u32 app3;
- u32 app4;
- u32 sw_id_offset;
- u32 reserved5;
- u32 reserved6;
-};
+ u32 app4; /* Last field used by HW */
+ struct sk_buff *skb;
+} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
/**
* struct axienet_local - axienet private per device data
@@ -385,6 +383,7 @@ struct axidma_bd {
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
+ * @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
@@ -422,10 +421,17 @@ struct axienet_local {
/* Connection to PHY device */
struct device_node *phy_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+
+ /* Clock for AXI bus */
+ struct clk *clk;
+
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
/* IO registers, dma functions and IRQs */
+ resource_size_t regs_start;
void __iomem *regs;
void __iomem *dma_regs;
@@ -433,17 +439,19 @@ struct axienet_local {
int tx_irq;
int rx_irq;
+ int eth_irq;
phy_interface_t phy_mode;
u32 options; /* Current options word */
- u32 last_link;
u32 features;
/* Buffer descriptors */
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
u32 tx_bd_ci;
u32 tx_bd_tail;
u32 rx_bd_ci;
@@ -481,7 +489,7 @@ struct axienet_option {
*/
static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
{
- return in_be32(lp->regs + offset);
+ return ioread32(lp->regs + offset);
}
static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
@@ -501,12 +509,13 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
static inline void axienet_iow(struct axienet_local *lp, off_t offset,
u32 value)
{
- out_be32((lp->regs + offset), value);
+ iowrite32(value, lp->regs + offset);
}
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
-int axienet_mdio_wait_until_ready(struct axienet_local *lp);
+int axienet_mdio_enable(struct axienet_local *lp);
+void axienet_mdio_disable(struct axienet_local *lp);
+int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 831967f6eff8..da420c881662 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,6 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -21,6 +22,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -38,16 +40,18 @@
#include "xilinx_axienet.h"
-/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
-#define AXIENET_REGS_N 32
+#define AXIENET_REGS_N 40
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
@@ -125,7 +129,7 @@ static struct axienet_option axienet_options[] = {
*/
static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
{
- return in_be32(lp->dma_regs + reg);
+ return ioread32(lp->dma_regs + reg);
}
/**
@@ -140,7 +144,7 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
static inline void axienet_dma_out32(struct axienet_local *lp,
off_t reg, u32 value)
{
- out_be32((lp->dma_regs + reg), value);
+ iowrite32(value, lp->dma_regs + reg);
}
/**
@@ -156,22 +160,21 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+ dev_kfree_skb(lp->rx_bd_v[i].skb);
}
if (lp->rx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
if (lp->tx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
}
@@ -201,33 +204,33 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
+ ((i + 1) % lp->tx_bd_num);
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
+ ((i + 1) % lp->rx_bd_num);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
- lp->rx_bd_v[i].sw_id_offset = (u32) skb;
+ lp->rx_bd_v[i].skb = skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
lp->max_frm_size,
@@ -269,7 +272,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -434,17 +437,20 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static void __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
* reset process.
+ * Note that even though both TX and RX have their own reset register,
+ * they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
+ XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
@@ -470,8 +476,7 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(lp);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -498,6 +503,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
@@ -514,63 +521,6 @@ static void axienet_device_reset(struct net_device *ndev)
}
/**
- * axienet_adjust_link - Adjust the PHY link speed/duplex.
- * @ndev: Pointer to the net_device structure
- *
- * This function is called to change the speed and duplex setting after
- * auto negotiation is done by the PHY. This is the function that gets
- * registered with the PHY interface through the "of_phy_connect" call.
- */
-static void axienet_adjust_link(struct net_device *ndev)
-{
- u32 emmc_reg;
- u32 link_state;
- u32 setspeed = 1;
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = ndev->phydev;
-
- link_state = phy->speed | (phy->duplex << 1) | phy->link;
- if (lp->last_link != link_state) {
- if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
- if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
- setspeed = 0;
- } else {
- if ((phy->speed == SPEED_1000) &&
- (lp->phy_mode == PHY_INTERFACE_MODE_MII))
- setspeed = 0;
- }
-
- if (setspeed == 1) {
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
-
- switch (phy->speed) {
- case SPEED_1000:
- emmc_reg |= XAE_EMMC_LINKSPD_1000;
- break;
- case SPEED_100:
- emmc_reg |= XAE_EMMC_LINKSPD_100;
- break;
- case SPEED_10:
- emmc_reg |= XAE_EMMC_LINKSPD_10;
- break;
- default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
- "or 1Gbps is not supported\n");
- break;
- }
-
- axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
- lp->last_link = link_state;
- phy_print_status(phy);
- } else {
- netdev_err(ndev,
- "Error setting Axi Ethernet mac speed\n");
- }
- }
-}
-
-/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -595,26 +545,31 @@ static void axienet_start_xmit_done(struct net_device *ndev)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+ if (cur_p->skb)
+ dev_consume_skb_irq(cur_p->skb);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
+ cur_p->skb = NULL;
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
+ if (++lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -635,7 +590,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
@@ -670,9 +625,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (axienet_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -695,8 +660,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -707,13 +672,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
+ cur_p->skb = skb;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
return NETDEV_TX_OK;
}
@@ -742,13 +707,15 @@ static void axienet_recv(struct net_device *ndev)
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
- length = cur_p->app4 & 0x0000FFFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ skb = cur_p->skb;
+ cur_p->skb = NULL;
+ length = cur_p->app4 & 0x0000FFFF;
+
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
@@ -783,10 +750,10 @@ static void axienet_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32) new_skb;
+ cur_p->skb = new_skb;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
+ lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
@@ -802,7 +769,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -821,7 +788,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -851,7 +818,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -870,7 +837,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -895,6 +862,35 @@ out:
return IRQ_HANDLED;
}
+/**
+ * axienet_eth_irq - Ethernet core Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
+ *
+ * Handle miscellaneous conditions indicated by Ethernet core IRQ.
+ */
+static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned int pending;
+
+ pending = axienet_ior(lp, XAE_IP_OFFSET);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & XAE_INT_RXFIFOOVR_MASK)
+ ndev->stats.rx_missed_errors++;
+
+ if (pending & XAE_INT_RXRJECT_MASK)
+ ndev->stats.rx_frame_errors++;
+
+ axienet_iow(lp, XAE_IS_OFFSET, pending);
+ return IRQ_HANDLED;
+}
+
static void axienet_dma_err_handler(unsigned long data);
/**
@@ -904,67 +900,72 @@ static void axienet_dma_err_handler(unsigned long data);
* Return: 0, on success.
* non-zero error value on failure
*
- * This is the driver open routine. It calls phy_start to start the PHY device.
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
* It also allocates interrupt service routines, enables the interrupt lines
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
* descriptors are initialized.
*/
static int axienet_open(struct net_device *ndev)
{
- int ret, mdio_mcreg;
+ int ret;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
- return ret;
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
axienet_device_reset(ndev);
- /* Enable the MDIO */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- ret = axienet_mdio_wait_until_ready(lp);
+ ret = axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0, lp->phy_mode);
-
- if (!phydev)
- dev_err(lp->dev, "of_phy_connect() failed\n");
- else
- phy_start(phydev);
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
}
+ phylink_start(lp->phylink);
+
/* Enable tasklets for Axi DMA error handling */
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
/* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_tx_irq;
/* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_rx_irq;
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_eth_irq;
+ }
return 0;
+err_eth_irq:
+ free_irq(lp->rx_irq, ndev);
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (phydev)
- phy_disconnect(phydev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
@@ -976,34 +977,61 @@ err_tx_irq:
*
* Return: 0, on success.
*
- * This is the driver stop routine. It calls phy_disconnect to stop the PHY
+ * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
* The Axi DMA Tx/Rx BDs are released.
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr;
+ u32 cr, sr;
+ int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+
tasklet_kill(&lp->dma_err_tasklet);
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
-
axienet_dma_bd_release(ndev);
return 0;
}
@@ -1151,6 +1179,48 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+}
+
+static void axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
}
/**
@@ -1166,12 +1236,9 @@ static void
axienet_ethtools_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
- epauseparm->autoneg = 0;
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
- epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
+
+ phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1190,27 +1257,9 @@ static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
- }
-
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- if (epauseparm->tx_pause)
- regval |= XAE_FCC_FCTX_MASK;
- else
- regval &= ~XAE_FCC_FCTX_MASK;
- if (epauseparm->rx_pause)
- regval |= XAE_FCC_FCRX_MASK;
- else
- regval &= ~XAE_FCC_FCRX_MASK;
- axienet_iow(lp, XAE_FCC_OFFSET, regval);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1289,17 +1338,170 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
+static int
+axienet_ethtools_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(lp->phylink, cmd);
+}
+
+static int
+axienet_ethtools_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(lp->phylink, cmd);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ringparam = axienet_ethtools_get_ringparam,
+ .set_ringparam = axienet_ethtools_set_ringparam,
.get_pauseparam = axienet_ethtools_get_pauseparam,
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = axienet_ethtools_get_link_ksettings,
+ .set_link_ksettings = axienet_ethtools_set_link_ksettings,
+};
+
+static void axienet_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* Only support the mode we are configured for */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, Pause);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int axienet_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ state->interface = lp->phy_mode;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ if (emmc_reg & XAE_EMMC_LINKSPD_1000)
+ state->speed = SPEED_1000;
+ else if (emmc_reg & XAE_EMMC_LINKSPD_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+
+ state->pause = 0;
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (fcc_reg & XAE_FCC_FCTX_MASK)
+ state->pause |= MLO_PAUSE_TX;
+ if (fcc_reg & XAE_FCC_FCRX_MASK)
+ state->pause |= MLO_PAUSE_RX;
+
+ state->an_complete = 0;
+ state->duplex = 1;
+
+ return 1;
+}
+
+static void axienet_mac_an_restart(struct phylink_config *config)
+{
+ /* Unsupported, do nothing */
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev,
+ "Speed other than 10, 100 or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (state->pause & MLO_PAUSE_TX)
+ fcc_reg |= XAE_FCC_FCTX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCTX_MASK;
+ if (state->pause & MLO_PAUSE_RX)
+ fcc_reg |= XAE_FCC_FCRX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
+{
+ /* nothing meaningful to do */
+}
+
+static const struct phylink_mac_ops axienet_phylink_ops = {
+ .validate = axienet_validate,
+ .mac_link_state = axienet_mac_link_state,
+ .mac_an_restart = axienet_mac_an_restart,
+ .mac_config = axienet_mac_config,
+ .mac_link_down = axienet_mac_link_down,
+ .mac_link_up = axienet_mac_link_up,
};
/**
@@ -1313,38 +1515,33 @@ static void axienet_dma_err_handler(unsigned long data)
{
u32 axienet_status;
u32 cr, i;
- int mdio_mcreg;
struct axienet_local *lp = (struct axienet_local *) data;
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- axienet_mdio_wait_until_ready(lp);
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
- ~XAE_MDIO_MC_MDIOEN_MASK));
-
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
-
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- axienet_mdio_wait_until_ready(lp);
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->phys)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
+ if (cur_p->skb)
+ dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
@@ -1353,10 +1550,10 @@ static void axienet_dma_err_handler(unsigned long data)
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
- cur_p->sw_id_offset = 0;
+ cur_p->skb = NULL;
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1404,7 +1601,7 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -1422,6 +1619,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
@@ -1453,7 +1652,7 @@ static int axienet_probe(struct platform_device *pdev)
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1476,8 +1675,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs_start = ethres->start;
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
@@ -1568,38 +1770,57 @@ static int axienet_probe(struct platform_device *pdev)
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- goto free_netdev;
+ lp->eth_irq = platform_get_irq(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get DMA memory resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq(pdev, 2);
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
of_node_put(np);
goto free_netdev;
}
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto free_netdev;
}
+ /* Check for Ethernet core IRQ (optional) */
+ if (lp->eth_irq <= 0)
+ dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
if (IS_ERR(mac_addr)) {
- dev_err(&pdev->dev, "could not find MAC address\n");
- goto free_netdev;
+ dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
+ PTR_ERR(mac_addr));
+ mac_addr = NULL;
}
axienet_set_mac_address(ndev, mac_addr);
@@ -1608,9 +1829,36 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
- ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ lp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+ PTR_ERR(lp->clk));
+ lp->clk = NULL;
+ } else {
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+ ret);
+ goto free_netdev;
+ }
+ }
+
+ ret = axienet_mdio_setup(lp);
if (ret)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+ }
+
+ lp->phylink_config.dev = &ndev->dev;
+ lp->phylink_config.type = PHYLINK_NETDEV;
+
+ lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
+ lp->phy_mode,
+ &axienet_phylink_ops);
+ if (IS_ERR(lp->phylink)) {
+ ret = PTR_ERR(lp->phylink);
+ dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
+ goto free_netdev;
}
ret = register_netdev(lp->ndev);
@@ -1632,9 +1880,16 @@ static int axienet_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
- axienet_mdio_teardown(lp);
unregister_netdev(ndev);
+ if (lp->phylink)
+ phylink_destroy(lp->phylink);
+
+ axienet_mdio_teardown(lp);
+
+ if (lp->clk)
+ clk_disable_unprepare(lp->clk);
+
of_node_put(lp->phy_node);
lp->phy_node = NULL;
@@ -1643,9 +1898,23 @@ static int axienet_remove(struct platform_device *pdev)
return 0;
}
+static void axienet_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev))
+ dev_close(ndev);
+
+ rtnl_unlock();
+}
+
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
.remove = axienet_remove,
+ .shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 704babdbc8a2..435ed308d990 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -5,9 +5,11 @@
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/jiffies.h>
@@ -16,10 +18,10 @@
#include "xilinx_axienet.h"
#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
-#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
+#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
/* Wait till MDIO interface is ready to accept a new transaction.*/
-int axienet_mdio_wait_until_ready(struct axienet_local *lp)
+static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
@@ -112,23 +114,42 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
}
/**
- * axienet_mdio_setup - MDIO setup function
+ * axienet_mdio_enable - MDIO hardware setup function
* @lp: Pointer to axienet local data structure.
- * @np: Pointer to device node
*
- * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
- * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ * Return: 0 on success, -ETIMEDOUT on a timeout.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
- * MDIO interface in hardware. Register the MDIO interface.
+ * MDIO interface in hardware.
**/
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
+int axienet_mdio_enable(struct axienet_local *lp)
{
- int ret;
u32 clk_div, host_clock;
- struct mii_bus *bus;
- struct resource res;
- struct device_node *np1;
+
+ if (lp->clk) {
+ host_clock = clk_get_rate(lp->clk);
+ } else {
+ struct device_node *np1;
+
+ /* Legacy fallback: detect CPU clock frequency and use as AXI
+ * bus clock frequency. This only works on certain platforms.
+ */
+ np1 = of_find_node_by_name(NULL, "cpu");
+ if (!np1) {
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ } else {
+ int ret = of_property_read_u32(np1, "clock-frequency",
+ &host_clock);
+ if (ret) {
+ netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ }
+ of_node_put(np1);
+ }
+ netdev_info(lp->ndev, "Setting assumed host clock to %u\n",
+ host_clock);
+ }
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -155,25 +176,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
* "clock-frequency" from the CPU
*/
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- netdev_warn(lp->ndev, "Could not find CPU device node.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- goto issue;
- }
- if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
- netdev_warn(lp->ndev, "clock-frequency property not found.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- of_node_put(np1);
- goto issue;
- }
-
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
@@ -186,12 +188,39 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
clk_div, host_clock);
- of_node_put(np1);
-issue:
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
- ret = axienet_mdio_wait_until_ready(lp);
+ return axienet_mdio_wait_until_ready(lp);
+}
+
+/**
+ * axienet_mdio_disable - MDIO hardware disable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Disable the MDIO interface in hardware.
+ **/
+void axienet_mdio_disable(struct axienet_local *lp)
+{
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, 0);
+}
+
+/**
+ * axienet_mdio_setup - MDIO setup function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ *
+ * Sets up the MDIO interface by initializing the MDIO clock and enabling the
+ * MDIO interface in hardware. Register the MDIO interface.
+ **/
+int axienet_mdio_setup(struct axienet_local *lp)
+{
+ struct device_node *mdio_node;
+ struct mii_bus *bus;
+ int ret;
+
+ ret = axienet_mdio_enable(lp);
if (ret < 0)
return ret;
@@ -199,10 +228,8 @@ issue:
if (!bus)
return -ENOMEM;
- np1 = of_get_parent(lp->phy_node);
- of_address_to_resource(np1, 0, &res);
- snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long) res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "axienet-%.8llx",
+ (unsigned long long)lp->regs_start);
bus->priv = lp;
bus->name = "Xilinx Axi Ethernet MDIO";
@@ -211,7 +238,9 @@ issue:
bus->parent = lp->dev;
lp->mii_bus = bus;
- ret = of_mdiobus_register(bus, np1);
+ mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio");
+ ret = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (ret) {
mdiobus_free(bus);
lp->mii_bus = NULL;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 681a882c32cd..940192c057b6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -827,7 +827,7 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ifreq ifrr;
int err = -EOPNOTSUPP;
- strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ strscpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b509b941d5ca..c5c417a3c0ce 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -38,6 +38,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+ debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_status);
return 0;
}
@@ -220,8 +222,49 @@ static int nsim_dev_reload(struct devlink *devlink,
return 0;
}
+#define NSIM_DEV_FLASH_SIZE 500000
+#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
+#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
+
+static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ int i;
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash",
+ component, 0, 0);
+ }
+
+ for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
+ if (nsim_dev->fw_update_status)
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component,
+ i * NSIM_DEV_FLASH_CHUNK_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
+ }
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component,
+ NSIM_DEV_FLASH_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, 0, 0);
+ devlink_flash_update_end_notify(devlink);
+ }
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload = nsim_dev_reload,
+ .flash_update = nsim_dev_flash_update,
};
static struct nsim_dev *
@@ -240,6 +283,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
nsim_dev->fib_data = nsim_fib_create();
if (IS_ERR(nsim_dev->fib_data)) {
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 3f398797c2bc..79c05af2a7c0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -157,6 +157,7 @@ struct nsim_dev {
struct netdev_phys_item_id switch_id;
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
+ bool fw_update_status;
};
int nsim_dev_init(void);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f99f27800fdb..db5645b0c898 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -416,6 +416,12 @@ config NATIONAL_PHY
---help---
Currently supports the DP83865 PHY.
+config NXP_TJA11XX_PHY
+ tristate "NXP TJA11xx PHYs support"
+ depends on HWMON
+ ---help---
+ Currently supports the NXP TJA1100 and TJA1101 PHY.
+
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 27d7f9f3b0de..bac339e09042 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 0fedd28fdb6e..3b29d381116f 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -27,6 +27,7 @@
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
@@ -360,6 +361,9 @@ static int aqr107_read_status(struct phy_device *phydev)
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
+ phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
@@ -488,9 +492,13 @@ static int aqr107_config_init(struct phy_device *phydev)
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR)
return -ENODEV;
+ WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
+ "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
+
ret = aqr107_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f0c0eefe2202..f6dce6850850 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -81,22 +81,18 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
}
#endif /* CONFIG_OF_MDIO */
-static int bcm87xx_config_init(struct phy_device *phydev)
+static int bcm87xx_get_features(struct phy_device *phydev)
{
- linkmode_zero(phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
phydev->supported);
- linkmode_zero(phydev->advertising);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
- phydev->advertising);
- phydev->state = PHY_NOLINK;
- phydev->autoneg = AUTONEG_DISABLE;
-
- bcm87xx_of_reg_init(phydev);
-
return 0;
}
+static int bcm87xx_config_init(struct phy_device *phydev)
+{
+ return bcm87xx_of_reg_init(phydev);
+}
+
static int bcm87xx_config_aneg(struct phy_device *phydev)
{
return -EINVAL;
@@ -194,7 +190,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
- .features = PHY_10GBIT_FEC_FEATURES,
+ .get_features = bcm87xx_get_features,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
- .features = PHY_10GBIT_FEC_FEATURES,
+ .get_features = bcm87xx_get_features,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c71c7d0f53f0..1f1ecee0ee2f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -34,6 +34,7 @@
#define DP83867_RGMIICTL 0x0032
#define DP83867_STRAP_STS1 0x006E
+#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_10M_SGMII_CFG 0x016F
@@ -63,19 +64,30 @@
/* STRAP_STS1 bits */
#define DP83867_STRAP_STS1_RESERVED BIT(11)
+/* STRAP_STS2 bits */
+#define DP83867_STRAP_STS2_CLK_SKEW_TX_MASK GENMASK(6, 4)
+#define DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT 4
+#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
+#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
+#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
+
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
-#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
+#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
+#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
/* IO_MUX_CFG bits */
-#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
-
+#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83867_IO_MUX_CFG_CLK_O_DISABLE BIT(6)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
@@ -89,13 +101,14 @@ enum {
};
struct dp83867_private {
- int rx_id_delay;
- int tx_id_delay;
- int fifo_depth;
+ u32 rx_id_delay;
+ u32 tx_id_delay;
+ u32 fifo_depth;
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
- int clk_output_sel;
+ bool set_clk_output;
+ u32 clk_output_sel;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -157,38 +170,83 @@ static int dp83867_of_init(struct phy_device *phydev)
if (!of_node)
return -ENODEV;
- dp83867->io_impedance = -EINVAL;
-
/* Optional configuration */
ret = of_property_read_u32(of_node, "ti,clk-output-sel",
&dp83867->clk_output_sel);
- if (ret || dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK)
- /* Keep the default value if ti,clk-output-sel is not set
- * or too high
+ /* If not set, keep default */
+ if (!ret) {
+ dp83867->set_clk_output = true;
+ /* Valid values are 0 to DP83867_CLK_O_SEL_REF_CLK or
+ * DP83867_CLK_O_SEL_OFF.
*/
- dp83867->clk_output_sel = DP83867_CLK_O_SEL_REF_CLK;
+ if (dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK &&
+ dp83867->clk_output_sel != DP83867_CLK_O_SEL_OFF) {
+ phydev_err(phydev, "ti,clk-output-sel value %u out of range\n",
+ dp83867->clk_output_sel);
+ return -EINVAL;
+ }
+ }
if (of_property_read_bool(of_node, "ti,max-output-impedance"))
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+ else
+ dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
"ti,dp83867-rxctrl-strap-quirk");
- ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
- &dp83867->rx_id_delay);
- if (ret &&
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
- return ret;
+ /* Existing behavior was to use default pin strapping delay in rgmii
+ * mode, but rgmii should have meant no delay. Warn existing users.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
+ const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
+ DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
+ const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
+ DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
+
+ if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
+ rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
+ phydev_warn(phydev,
+ "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
+ "Should be 'rgmii-id' to use internal delays\n");
+ }
- ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
- &dp83867->tx_id_delay);
- if (ret &&
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
- return ret;
+ /* RX delay *must* be specified if internal delay of RX is used. */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+ &dp83867->rx_id_delay);
+ if (ret) {
+ phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
+ return ret;
+ }
+ if (dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
+ phydev_err(phydev,
+ "ti,rx-internal-delay value of %u out of range\n",
+ dp83867->rx_id_delay);
+ return -EINVAL;
+ }
+ }
+
+ /* TX delay *must* be specified if internal delay of RX is used. */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+ &dp83867->tx_id_delay);
+ if (ret) {
+ phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
+ return ret;
+ }
+ if (dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
+ phydev_err(phydev,
+ "ti,tx-internal-delay value of %u out of range\n",
+ dp83867->tx_id_delay);
+ return -EINVAL;
+ }
+ }
if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_EN;
@@ -196,8 +254,20 @@ static int dp83867_of_init(struct phy_device *phydev)
if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
- return of_property_read_u32(of_node, "ti,fifo-depth",
+ ret = of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
+ if (ret) {
+ phydev_err(phydev,
+ "ti,fifo-depth property is required\n");
+ return ret;
+ }
+ if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev,
+ "ti,fifo-depth value %u out of range\n",
+ dp83867->fifo_depth);
+ return -EINVAL;
+ }
+ return 0;
}
#else
static int dp83867_of_init(struct phy_device *phydev)
@@ -206,25 +276,29 @@ static int dp83867_of_init(struct phy_device *phydev)
}
#endif /* CONFIG_OF_MDIO */
-static int dp83867_config_init(struct phy_device *phydev)
+static int dp83867_probe(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
+
+ dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
+ GFP_KERNEL);
+ if (!dp83867)
+ return -ENOMEM;
+
+ phydev->priv = dp83867;
+
+ return 0;
+}
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
u16 delay;
- if (!phydev->priv) {
- dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
- GFP_KERNEL);
- if (!dp83867)
- return -ENOMEM;
-
- phydev->priv = dp83867;
- ret = dp83867_of_init(phydev);
- if (ret)
- return ret;
- } else {
- dp83867 = (struct dp83867_private *)phydev->priv;
- }
+ ret = dp83867_of_init(phydev);
+ if (ret)
+ return ret;
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
@@ -256,9 +330,16 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* Set up RGMII delays */
+ /* If rgmii mode with no internal delay is selected, we do NOT use
+ * aligned mode as one might expect. Instead we use the PHY's default
+ * based on pin strapping. And the "mode 0" default is to *use*
+ * internal delay with a value of 7 (2.00 ns).
+ *
+ * Set up RGMII delays
+ */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -275,14 +356,14 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
delay);
-
- if (dp83867->io_impedance >= 0)
- phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
- dp83867->io_impedance &
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
}
+ /* If specified, set io impedance */
+ if (dp83867->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK,
+ dp83867->io_impedance);
+
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
/* For support SPEED_10 in SGMII mode
* DP83867_10M_SGMII_RATE_ADAPT bit
@@ -321,11 +402,20 @@ static int dp83867_config_init(struct phy_device *phydev)
dp83867_config_port_mirroring(phydev);
/* Clock output selection if muxing property is set */
- if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK)
+ if (dp83867->set_clk_output) {
+ u16 mask = DP83867_IO_MUX_CFG_CLK_O_DISABLE;
+
+ if (dp83867->clk_output_sel == DP83867_CLK_O_SEL_OFF) {
+ val = DP83867_IO_MUX_CFG_CLK_O_DISABLE;
+ } else {
+ mask |= DP83867_IO_MUX_CFG_CLK_O_SEL_MASK;
+ val = dp83867->clk_output_sel <<
+ DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT;
+ }
+
phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
- DP83867_IO_MUX_CFG_CLK_O_SEL_MASK,
- dp83867->clk_output_sel <<
- DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+ mask, val);
+ }
return 0;
}
@@ -350,6 +440,7 @@ static struct phy_driver dp83867_driver[] = {
.name = "TI DP83867",
/* PHY_GBIT_FEATURES */
+ .probe = dp83867_probe,
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 314486288119..356bd6472f49 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -262,6 +262,8 @@ static struct phy_driver lxt97x_driver[] = {
/* PHY_BASIC_FEATURES */
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = 0x00137a10,
.name = "LXT973-A2",
@@ -271,6 +273,8 @@ static struct phy_driver lxt97x_driver[] = {
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = lxt973a2_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = 0x00137a10,
.name = "LXT973",
@@ -279,6 +283,8 @@ static struct phy_driver lxt97x_driver[] = {
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
new file mode 100644
index 000000000000..b705d0bd798b
--- /dev/null
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/* NXP TJA1100 BroadRReach PHY driver
+ *
+ * Copyright (C) 2018 Marek Vasut <marex@denx.de>
+ */
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/hwmon.h>
+#include <linux/bitfield.h>
+
+#define PHY_ID_MASK 0xfffffff0
+#define PHY_ID_TJA1100 0x0180dc40
+#define PHY_ID_TJA1101 0x0180dd00
+
+#define MII_ECTRL 17
+#define MII_ECTRL_LINK_CONTROL BIT(15)
+#define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11)
+#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
+#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
+#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
+#define MII_ECTRL_CONFIG_EN BIT(2)
+#define MII_ECTRL_WAKE_REQUEST BIT(0)
+
+#define MII_CFG1 18
+#define MII_CFG1_AUTO_OP BIT(14)
+#define MII_CFG1_SLEEP_CONFIRM BIT(6)
+#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
+#define MII_CFG1_LED_MODE_LINKUP 0
+#define MII_CFG1_LED_ENABLE BIT(3)
+
+#define MII_CFG2 19
+#define MII_CFG2_SLEEP_REQUEST_TO GENMASK(1, 0)
+#define MII_CFG2_SLEEP_REQUEST_TO_16MS 0x3
+
+#define MII_INTSRC 21
+#define MII_INTSRC_TEMP_ERR BIT(1)
+#define MII_INTSRC_UV_ERR BIT(3)
+
+#define MII_COMMSTAT 23
+#define MII_COMMSTAT_LINK_UP BIT(15)
+
+#define MII_GENSTAT 24
+#define MII_GENSTAT_PLL_LOCKED BIT(14)
+
+#define MII_COMMCFG 27
+#define MII_COMMCFG_AUTO_OP BIT(15)
+
+struct tja11xx_priv {
+ char *hwmon_name;
+ struct device *hwmon_dev;
+};
+
+struct tja11xx_phy_stats {
+ const char *string;
+ u8 reg;
+ u8 off;
+ u16 mask;
+};
+
+static struct tja11xx_phy_stats tja11xx_hw_stats[] = {
+ { "phy_symbol_error_count", 20, 0, GENMASK(15, 0) },
+ { "phy_polarity_detect", 25, 6, BIT(6) },
+ { "phy_open_detect", 25, 7, BIT(7) },
+ { "phy_short_detect", 25, 8, BIT(8) },
+ { "phy_rem_rcvr_count", 26, 0, GENMASK(7, 0) },
+ { "phy_loc_rcvr_count", 26, 8, GENMASK(15, 8) },
+};
+
+static int tja11xx_check(struct phy_device *phydev, u8 reg, u16 mask, u16 set)
+{
+ int i, ret;
+
+ for (i = 0; i < 200; i++) {
+ ret = phy_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & mask) == set)
+ return 0;
+
+ usleep_range(100, 150);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int phy_modify_check(struct phy_device *phydev, u8 reg,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = phy_modify(phydev, reg, mask, set);
+ if (ret)
+ return ret;
+
+ return tja11xx_check(phydev, reg, mask, set);
+}
+
+static int tja11xx_enable_reg_write(struct phy_device *phydev)
+{
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_CONFIG_EN);
+}
+
+static int tja11xx_enable_link_control(struct phy_device *phydev)
+{
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
+}
+
+static int tja11xx_wakeup(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_ECTRL);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & MII_ECTRL_POWER_MODE_MASK) {
+ case MII_ECTRL_POWER_MODE_NO_CHANGE:
+ break;
+ case MII_ECTRL_POWER_MODE_NORMAL:
+ ret = phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_WAKE_REQUEST);
+ if (ret)
+ return ret;
+
+ ret = phy_clear_bits(phydev, MII_ECTRL, MII_ECTRL_WAKE_REQUEST);
+ if (ret)
+ return ret;
+ break;
+ case MII_ECTRL_POWER_MODE_STANDBY:
+ ret = phy_modify_check(phydev, MII_ECTRL,
+ MII_ECTRL_POWER_MODE_MASK,
+ MII_ECTRL_POWER_MODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_ECTRL, MII_ECTRL_POWER_MODE_MASK,
+ MII_ECTRL_POWER_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ ret = phy_modify_check(phydev, MII_GENSTAT,
+ MII_GENSTAT_PLL_LOCKED,
+ MII_GENSTAT_PLL_LOCKED);
+ if (ret)
+ return ret;
+
+ return tja11xx_enable_link_control(phydev);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tja11xx_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = tja11xx_enable_reg_write(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int tja11xx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = tja11xx_enable_reg_write(phydev);
+ if (ret)
+ return ret;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ switch (phydev->phy_id & PHY_ID_MASK) {
+ case PHY_ID_TJA1100:
+ ret = phy_modify(phydev, MII_CFG1,
+ MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
+ MII_CFG1_LED_ENABLE,
+ MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
+ MII_CFG1_LED_ENABLE);
+ if (ret)
+ return ret;
+ break;
+ case PHY_ID_TJA1101:
+ ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_clear_bits(phydev, MII_CFG1, MII_CFG1_SLEEP_CONFIRM);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_CFG2, MII_CFG2_SLEEP_REQUEST_TO,
+ MII_CFG2_SLEEP_REQUEST_TO_16MS);
+ if (ret)
+ return ret;
+
+ ret = tja11xx_wakeup(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* ACK interrupts by reading the status register */
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tja11xx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->link) {
+ ret = phy_read(phydev, MII_COMMSTAT);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_COMMSTAT_LINK_UP))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
+static int tja11xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(tja11xx_hw_stats);
+}
+
+static void tja11xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) {
+ strncpy(data + i * ETH_GSTRING_LEN,
+ tja11xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void tja11xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) {
+ ret = phy_read(phydev, tja11xx_hw_stats[i].reg);
+ if (ret < 0)
+ data[i] = U64_MAX;
+ else {
+ data[i] = ret & tja11xx_hw_stats[i].mask;
+ data[i] >>= tja11xx_hw_stats[i].off;
+ }
+ }
+}
+
+static int tja11xx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ if (type == hwmon_in && attr == hwmon_in_lcrit_alarm) {
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ *value = !!(ret & MII_INTSRC_TEMP_ERR);
+ return 0;
+ }
+
+ if (type == hwmon_temp && attr == hwmon_temp_crit_alarm) {
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ *value = !!(ret & MII_INTSRC_UV_ERR);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t tja11xx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_in && attr == hwmon_in_lcrit_alarm)
+ return 0444;
+
+ if (type == hwmon_temp && attr == hwmon_temp_crit_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *tja11xx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(in, HWMON_I_LCRIT_ALARM),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tja11xx_hwmon_hwmon_ops = {
+ .is_visible = tja11xx_hwmon_is_visible,
+ .read = tja11xx_hwmon_read,
+};
+
+static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
+ .ops = &tja11xx_hwmon_hwmon_ops,
+ .info = tja11xx_hwmon_info,
+};
+
+static int tja11xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->hwmon_name)
+ return -ENOMEM;
+
+ for (i = 0; priv->hwmon_name[i]; i++)
+ if (hwmon_is_bad_char(priv->hwmon_name[i]))
+ priv->hwmon_name[i] = '_';
+
+ priv->hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
+ phydev,
+ &tja11xx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+}
+
+static struct phy_driver tja11xx_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100),
+ .name = "NXP TJA1100",
+ .features = PHY_BASIC_T1_FEATURES,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }, {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101),
+ .name = "NXP TJA1101",
+ .features = PHY_BASIC_T1_FEATURES,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }
+};
+
+module_phy_driver(tja11xx_driver);
+
+static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, tja11xx_tbl);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("NXP TJA11xx BoardR-Reach PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 3daf0214a242..16667fbac8bf 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 69,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -131,9 +131,11 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
PHY_SETTING( 1000, FULL, 1000baseT_Full ),
PHY_SETTING( 1000, HALF, 1000baseT_Half ),
+ PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
PHY_SETTING( 1000, FULL, 1000baseX_Full ),
/* 100M */
PHY_SETTING( 100, FULL, 100baseT_Full ),
+ PHY_SETTING( 100, FULL, 100baseT1_Full ),
PHY_SETTING( 100, HALF, 100baseT_Half ),
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e8885429293a..d9150765009e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,8 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#define PHY_STATE_TIME HZ
+
#define PHY_STATE_STR(_state) \
case PHY_##_state: \
return __stringify(_state); \
@@ -41,7 +43,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(UP)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
- PHY_STATE_STR(FORCING)
PHY_STATE_STR(HALTED)
}
@@ -407,6 +408,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
bool change_autoneg = false;
+ int prtad, devad;
switch (cmd) {
case SIOCGMIIPHY:
@@ -414,14 +416,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
/* fall through */
case SIOCGMIIREG:
- mii_data->val_out = mdiobus_read(phydev->mdio.bus,
- mii_data->phy_id,
- mii_data->reg_num);
+ if (mdio_phy_id_is_c45(mii_data->phy_id)) {
+ prtad = mdio_phy_id_prtad(mii_data->phy_id);
+ devad = mdio_phy_id_devad(mii_data->phy_id);
+ devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ } else {
+ prtad = mii_data->phy_id;
+ devad = mii_data->reg_num;
+ }
+ mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad,
+ devad);
return 0;
case SIOCSMIIREG:
- if (mii_data->phy_id == phydev->mdio.addr) {
- switch (mii_data->reg_num) {
+ if (mdio_phy_id_is_c45(mii_data->phy_id)) {
+ prtad = mdio_phy_id_prtad(mii_data->phy_id);
+ devad = mdio_phy_id_devad(mii_data->phy_id);
+ devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ } else {
+ prtad = mii_data->phy_id;
+ devad = mii_data->reg_num;
+ }
+ if (prtad == phydev->mdio.addr) {
+ switch (devad) {
case MII_BMCR:
if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
if (phydev->autoneg == AUTONEG_ENABLE)
@@ -454,11 +471,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
}
- mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
- mii_data->reg_num, val);
+ mdiobus_write(phydev->mdio.bus, prtad, devad, val);
- if (mii_data->phy_id == phydev->mdio.addr &&
- mii_data->reg_num == MII_BMCR &&
+ if (prtad == phydev->mdio.addr &&
+ devad == MII_BMCR &&
val & BMCR_RESET)
return phy_init_hw(phydev);
@@ -478,12 +494,12 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
-static void phy_queue_state_machine(struct phy_device *phydev,
- unsigned int secs)
+void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- secs * HZ);
+ jiffies);
}
+EXPORT_SYMBOL(phy_queue_state_machine);
static void phy_trigger_machine(struct phy_device *phydev)
{
@@ -560,15 +576,8 @@ int phy_start_aneg(struct phy_device *phydev)
if (err < 0)
goto out_unlock;
- if (phy_is_started(phydev)) {
- if (phydev->autoneg == AUTONEG_ENABLE) {
- err = phy_check_link_status(phydev);
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
- }
- }
-
+ if (phy_is_started(phydev))
+ err = phy_check_link_status(phydev);
out_unlock:
mutex_unlock(&phydev->lock);
@@ -772,8 +781,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
return IRQ_NONE;
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
+ if (phydev->drv->handle_interrupt) {
+ if (phydev->drv->handle_interrupt(phydev))
+ goto phy_err;
+ } else {
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev);
+ }
if (phy_clear_interrupt(phydev))
goto phy_err;
@@ -799,10 +813,10 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_request_interrupt - request interrupt for a PHY device
+ * phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
- * Description: Request the interrupt for the given PHY.
+ * Description: Request and enable the interrupt for the given PHY.
* If this fails, then we set irq to PHY_POLL.
* This should only be called with a valid IRQ number.
*/
@@ -817,11 +831,31 @@ void phy_request_interrupt(struct phy_device *phydev)
phydev_warn(phydev, "Error %d requesting IRQ %d, falling back to polling\n",
err, phydev->irq);
phydev->irq = PHY_POLL;
+ } else {
+ if (phy_enable_interrupts(phydev)) {
+ phydev_warn(phydev, "Can't enable interrupt, falling back to polling\n");
+ phy_free_interrupt(phydev);
+ phydev->irq = PHY_POLL;
+ }
}
}
EXPORT_SYMBOL(phy_request_interrupt);
/**
+ * phy_free_interrupt - disable and free interrupt for a PHY device
+ * @phydev: target phy_device struct
+ *
+ * Description: Disable and free the interrupt for the given PHY.
+ * This should only be called with a valid IRQ number.
+ */
+void phy_free_interrupt(struct phy_device *phydev)
+{
+ phy_disable_interrupts(phydev);
+ free_irq(phydev->irq, phydev);
+}
+EXPORT_SYMBOL(phy_free_interrupt);
+
+/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
@@ -835,9 +869,6 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
- if (phy_interrupt_is_valid(phydev))
- phy_disable_interrupts(phydev);
-
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -864,8 +895,6 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- int err;
-
mutex_lock(&phydev->lock);
if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) {
@@ -877,13 +906,6 @@ void phy_start(struct phy_device *phydev)
/* if phy was suspended, bring the physical link up again */
__phy_resume(phydev);
- /* make sure interrupts are enabled for the PHY */
- if (phy_interrupt_is_valid(phydev)) {
- err = phy_enable_interrupts(phydev);
- if (err < 0)
- goto out;
- }
-
phydev->state = PHY_UP;
phy_start_machine(phydev);
@@ -921,20 +943,6 @@ void phy_state_machine(struct work_struct *work)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
break;
- case PHY_FORCING:
- err = genphy_update_link(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- if (0 == phydev->link_timeout--)
- needs_aneg = true;
- phy_link_down(phydev, false);
- }
- break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index dcc93a873174..03c885ec1f98 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(phy_10_100_features_array);
const int phy_basic_t1_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
@@ -948,6 +948,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
{
int rc;
+ if (!dev)
+ return -EINVAL;
+
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -1013,7 +1016,7 @@ void phy_disconnect(struct phy_device *phydev)
phy_stop(phydev);
if (phy_interrupt_is_valid(phydev))
- free_irq(phydev->irq, phydev);
+ phy_free_interrupt(phydev);
phydev->adjust_link = NULL;
@@ -1133,6 +1136,44 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
}
EXPORT_SYMBOL(phy_attached_print);
+static void phy_sysfs_create_links(struct phy_device *phydev)
+{
+ struct net_device *dev = phydev->attached_dev;
+ int err;
+
+ if (!dev)
+ return;
+
+ err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
+ "attached_dev");
+ if (err)
+ return;
+
+ err = sysfs_create_link_nowarn(&dev->dev.kobj,
+ &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err) {
+ dev_err(&dev->dev, "could not add device link to %s err %d\n",
+ kobject_name(&phydev->mdio.dev.kobj),
+ err);
+ /* non-fatal - some net drivers can use one netdevice
+ * with more then one phy
+ */
+ }
+
+ phydev->sysfs_links = true;
+}
+
+static ssize_t
+phy_standalone_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", !phydev->attached_dev);
+}
+static DEVICE_ATTR_RO(phy_standalone);
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1151,9 +1192,9 @@ EXPORT_SYMBOL(phy_attached_print);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
- struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
+ struct module *ndev_owner = NULL;
bool using_genphy = false;
int err;
@@ -1162,8 +1203,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* our own module->refcnt here, otherwise we would not be able to
* unload later on.
*/
+ if (dev)
+ ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner && !try_module_get(bus->owner)) {
- dev_err(&dev->dev, "failed to get the bus module\n");
+ phydev_err(phydev, "failed to get the bus module\n");
return -EIO;
}
@@ -1182,7 +1225,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
}
if (!try_module_get(d->driver->owner)) {
- dev_err(&dev->dev, "failed to get the device driver module\n");
+ phydev_err(phydev, "failed to get the device driver module\n");
err = -EIO;
goto error_put_device;
}
@@ -1203,8 +1246,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
}
phydev->phy_link_change = phy_link_change;
- phydev->attached_dev = dev;
- dev->phydev = phydev;
+ if (dev) {
+ phydev->attached_dev = dev;
+ dev->phydev = phydev;
+ }
/* Some Ethernet drivers try to connect to a PHY device before
* calling register_netdevice() -> netdev_register_kobject() and
@@ -1216,22 +1261,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
*/
phydev->sysfs_links = false;
- err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
- "attached_dev");
- if (!err) {
- err = sysfs_create_link_nowarn(&dev->dev.kobj,
- &phydev->mdio.dev.kobj,
- "phydev");
- if (err) {
- dev_err(&dev->dev, "could not add device link to %s err %d\n",
- kobject_name(&phydev->mdio.dev.kobj),
- err);
- /* non-fatal - some net drivers can use one netdevice
- * with more then one phy
- */
- }
+ phy_sysfs_create_links(phydev);
- phydev->sysfs_links = true;
+ if (!phydev->attached_dev) {
+ err = sysfs_create_file(&phydev->mdio.dev.kobj,
+ &dev_attr_phy_standalone.attr);
+ if (err)
+ phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n");
}
phydev->dev_flags = flags;
@@ -1243,7 +1279,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Initial carrier state is off as the phy is about to be
* (re)initialized.
*/
- netif_carrier_off(phydev->attached_dev);
+ if (dev)
+ netif_carrier_off(phydev->attached_dev);
/* Do initial configuration here, now that
* we have certain key parameters
@@ -1290,6 +1327,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
struct device *d;
int rc;
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
@@ -1349,16 +1389,24 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
void phy_detach(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
- struct module *ndev_owner = dev->dev.parent->driver->owner;
+ struct module *ndev_owner = NULL;
struct mii_bus *bus;
if (phydev->sysfs_links) {
- sysfs_remove_link(&dev->dev.kobj, "phydev");
+ if (dev)
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
}
+
+ if (!phydev->attached_dev)
+ sysfs_remove_file(&phydev->mdio.dev.kobj,
+ &dev_attr_phy_standalone.attr);
+
phy_suspend(phydev);
- phydev->attached_dev->phydev = NULL;
- phydev->attached_dev = NULL;
+ if (dev) {
+ phydev->attached_dev->phydev = NULL;
+ phydev->attached_dev = NULL;
+ }
phydev->phylink = NULL;
phy_led_triggers_unregister(phydev);
@@ -1381,6 +1429,8 @@ void phy_detach(struct phy_device *phydev)
bus = phydev->mdio.bus;
put_device(&phydev->mdio.dev);
+ if (dev)
+ ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -1880,6 +1930,9 @@ int genphy_config_init(struct phy_device *phydev)
if (val & ESTATUS_1000_THALF)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
features);
+ if (val & ESTATUS_1000_XFULL)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ features);
}
linkmode_and(phydev->supported, phydev->supported, features);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 4c0616ba314d..5d0af041b8f9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -41,6 +41,9 @@ struct phylink {
/* private: */
struct net_device *netdev;
const struct phylink_mac_ops *ops;
+ struct phylink_config *config;
+ struct device *dev;
+ unsigned int old_link_state:1;
unsigned long phylink_disable_state; /* bitmask of disables */
struct phy_device *phydev;
@@ -56,6 +59,7 @@ struct phylink {
phy_interface_t cur_interface;
struct gpio_desc *link_gpio;
+ unsigned int link_irq;
struct timer_list link_poll;
void (*get_fixed_state)(struct net_device *dev,
struct phylink_link_state *s);
@@ -69,6 +73,23 @@ struct phylink {
struct sfp_bus *sfp_bus;
};
+#define phylink_printk(level, pl, fmt, ...) \
+ do { \
+ if ((pl)->config->type == PHYLINK_NETDEV) \
+ netdev_printk(level, (pl)->netdev, fmt, ##__VA_ARGS__); \
+ else if ((pl)->config->type == PHYLINK_DEV) \
+ dev_printk(level, (pl)->dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define phylink_err(pl, fmt, ...) \
+ phylink_printk(KERN_ERR, pl, fmt, ##__VA_ARGS__)
+#define phylink_warn(pl, fmt, ...) \
+ phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
+#define phylink_info(pl, fmt, ...) \
+ phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#define phylink_dbg(pl, fmt, ...) \
+ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
* @mask: ethtool link mode mask
@@ -115,7 +136,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- pl->ops->validate(pl->netdev, supported, state);
+ pl->ops->validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
@@ -165,7 +186,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
NULL, 0);
if (ret != ARRAY_SIZE(prop)) {
- netdev_err(pl->netdev, "broken fixed-link?\n");
+ phylink_err(pl, "broken fixed-link?\n");
return -EINVAL;
}
@@ -184,8 +205,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
if (pl->link_config.speed > SPEED_1000 &&
pl->link_config.duplex != DUPLEX_FULL)
- netdev_warn(pl->netdev, "fixed link specifies half duplex for %dMbps link?\n",
- pl->link_config.speed);
+ phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
+ pl->link_config.speed);
bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
linkmode_copy(pl->link_config.advertising, pl->supported);
@@ -198,9 +219,9 @@ static int phylink_parse_fixedlink(struct phylink *pl,
if (s) {
__set_bit(s->bit, pl->supported);
} else {
- netdev_warn(pl->netdev, "fixed link %s duplex %dMbps not recognised\n",
- pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
- pl->link_config.speed);
+ phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
+ pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
+ pl->link_config.speed);
}
linkmode_and(pl->link_config.advertising, pl->link_config.advertising,
@@ -225,8 +246,8 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
if (pl->link_an_mode == MLO_AN_FIXED) {
- netdev_err(pl->netdev,
- "can't use both fixed-link and in-band-status\n");
+ phylink_err(pl,
+ "can't use both fixed-link and in-band-status\n");
return -EINVAL;
}
@@ -273,17 +294,17 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
break;
default:
- netdev_err(pl->netdev,
- "incorrect link mode %s for in-band status\n",
- phy_modes(pl->link_config.interface));
+ phylink_err(pl,
+ "incorrect link mode %s for in-band status\n",
+ phy_modes(pl->link_config.interface));
return -EINVAL;
}
linkmode_copy(pl->link_config.advertising, pl->supported);
if (phylink_validate(pl, pl->supported, &pl->link_config)) {
- netdev_err(pl->netdev,
- "failed to validate link configuration for in-band status\n");
+ phylink_err(pl,
+ "failed to validate link configuration for in-band status\n");
return -EINVAL;
}
}
@@ -294,16 +315,16 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
- netdev_dbg(pl->netdev,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
- __func__, phylink_an_mode_str(pl->link_an_mode),
- phy_modes(state->interface),
- phy_speed_to_str(state->speed),
- phy_duplex_to_str(state->duplex),
- __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
- state->pause, state->link, state->an_enabled);
-
- pl->ops->mac_config(pl->netdev, pl->link_an_mode, state);
+ phylink_dbg(pl,
+ "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ __func__, phylink_an_mode_str(pl->link_an_mode),
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
+ state->pause, state->link, state->an_enabled);
+
+ pl->ops->mac_config(pl->config, pl->link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -317,12 +338,11 @@ static void phylink_mac_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
phy_interface_mode_is_8023z(pl->link_config.interface))
- pl->ops->mac_an_restart(pl->netdev);
+ pl->ops->mac_an_restart(pl->config);
}
static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state)
{
- struct net_device *ndev = pl->netdev;
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
@@ -334,7 +354,7 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
state->an_complete = 0;
state->link = 1;
- return pl->ops->mac_link_state(ndev, state);
+ return pl->ops->mac_link_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
@@ -399,11 +419,43 @@ static const char *phylink_pause_to_str(int pause)
}
}
+static void phylink_mac_link_up(struct phylink *pl,
+ struct phylink_link_state link_state)
+{
+ struct net_device *ndev = pl->netdev;
+
+ pl->cur_interface = link_state.interface;
+ pl->ops->mac_link_up(pl->config, pl->link_an_mode,
+ pl->phy_state.interface,
+ pl->phydev);
+
+ if (ndev)
+ netif_carrier_on(ndev);
+
+ phylink_info(pl,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(link_state.speed),
+ phy_duplex_to_str(link_state.duplex),
+ phylink_pause_to_str(link_state.pause));
+}
+
+static void phylink_mac_link_down(struct phylink *pl)
+{
+ struct net_device *ndev = pl->netdev;
+
+ if (ndev)
+ netif_carrier_off(ndev);
+ pl->ops->mac_link_down(pl->config, pl->link_an_mode,
+ pl->cur_interface);
+ phylink_info(pl, "Link is Down\n");
+}
+
static void phylink_resolve(struct work_struct *w)
{
struct phylink *pl = container_of(w, struct phylink, resolve);
struct phylink_link_state link_state;
struct net_device *ndev = pl->netdev;
+ int link_changed;
mutex_lock(&pl->state_mutex);
if (pl->phylink_disable_state) {
@@ -446,25 +498,17 @@ static void phylink_resolve(struct work_struct *w)
}
}
- if (link_state.link != netif_carrier_ok(ndev)) {
- if (!link_state.link) {
- netif_carrier_off(ndev);
- pl->ops->mac_link_down(ndev, pl->link_an_mode,
- pl->cur_interface);
- netdev_info(ndev, "Link is Down\n");
- } else {
- pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(ndev, pl->link_an_mode,
- pl->cur_interface, pl->phydev);
-
- netif_carrier_on(ndev);
-
- netdev_info(ndev,
- "Link is Up - %s/%s - flow control %s\n",
- phy_speed_to_str(link_state.speed),
- phy_duplex_to_str(link_state.duplex),
- phylink_pause_to_str(link_state.pause));
- }
+ if (pl->netdev)
+ link_changed = (link_state.link != netif_carrier_ok(ndev));
+ else
+ link_changed = (link_state.link != pl->old_link_state);
+
+ if (link_changed) {
+ pl->old_link_state = link_state.link;
+ if (!link_state.link)
+ phylink_mac_link_down(pl);
+ else
+ phylink_mac_link_up(pl, link_state);
}
if (!link_state.link && pl->mac_link_dropped) {
pl->mac_link_dropped = false;
@@ -516,13 +560,12 @@ static int phylink_register_sfp(struct phylink *pl,
if (ret == -ENOENT)
return 0;
- netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
- ret);
+ phylink_err(pl, "unable to parse \"sfp\" node: %d\n",
+ ret);
return ret;
}
- pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
- &sfp_phylink_ops);
+ pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
if (!pl->sfp_bus)
return -ENOMEM;
@@ -543,7 +586,7 @@ static int phylink_register_sfp(struct phylink *pl,
* Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function.
*/
-struct phylink *phylink_create(struct net_device *ndev,
+struct phylink *phylink_create(struct phylink_config *config,
struct fwnode_handle *fwnode,
phy_interface_t iface,
const struct phylink_mac_ops *ops)
@@ -557,7 +600,17 @@ struct phylink *phylink_create(struct net_device *ndev,
mutex_init(&pl->state_mutex);
INIT_WORK(&pl->resolve, phylink_resolve);
- pl->netdev = ndev;
+
+ pl->config = config;
+ if (config->type == PHYLINK_NETDEV) {
+ pl->netdev = to_net_dev(config->dev);
+ } else if (config->type == PHYLINK_DEV) {
+ pl->dev = config->dev;
+ } else {
+ kfree(pl);
+ return ERR_PTR(-EINVAL);
+ }
+
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -612,7 +665,7 @@ void phylink_destroy(struct phylink *pl)
{
if (pl->sfp_bus)
sfp_unregister_upstream(pl->sfp_bus);
- if (!IS_ERR_OR_NULL(pl->link_gpio))
+ if (pl->link_gpio)
gpiod_put(pl->link_gpio);
cancel_work_sync(&pl->resolve);
@@ -639,10 +692,10 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
phylink_run_resolve(pl);
- netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down",
- phy_modes(phydev->interface),
- phy_speed_to_str(phydev->speed),
- phy_duplex_to_str(phydev->duplex));
+ phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down",
+ phy_modes(phydev->interface),
+ phy_speed_to_str(phydev->speed),
+ phy_duplex_to_str(phydev->duplex));
}
static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
@@ -675,9 +728,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
- netdev_info(pl->netdev,
- "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
- phy->drv->name);
+ phylink_info(pl,
+ "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
+ phy->drv->name);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
@@ -690,10 +743,10 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
- netdev_dbg(pl->netdev,
- "phy: setting supported %*pb advertising %*pb\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
+ phylink_dbg(pl,
+ "phy: setting supported %*pb advertising %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
if (phy_interrupt_is_valid(phy))
phy_request_interrupt(phy);
@@ -871,10 +924,19 @@ void phylink_mac_change(struct phylink *pl, bool up)
if (!up)
pl->mac_link_dropped = true;
phylink_run_resolve(pl);
- netdev_dbg(pl->netdev, "mac link %s\n", up ? "up" : "down");
+ phylink_dbg(pl, "mac link %s\n", up ? "up" : "down");
}
EXPORT_SYMBOL_GPL(phylink_mac_change);
+static irqreturn_t phylink_link_handler(int irq, void *data)
+{
+ struct phylink *pl = data;
+
+ phylink_run_resolve(pl);
+
+ return IRQ_HANDLED;
+}
+
/**
* phylink_start() - start a phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -887,12 +949,13 @@ void phylink_start(struct phylink *pl)
{
ASSERT_RTNL();
- netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->link_an_mode),
- phy_modes(pl->link_config.interface));
+ phylink_info(pl, "configuring for %s/%s link mode\n",
+ phylink_an_mode_str(pl->link_an_mode),
+ phy_modes(pl->link_config.interface));
/* Always set the carrier off */
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
@@ -910,7 +973,22 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
- if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ int irq = gpiod_to_irq(pl->link_gpio);
+
+ if (irq > 0) {
+ if (!request_irq(irq, phylink_link_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "netdev link", pl))
+ pl->link_irq = irq;
+ else
+ irq = 0;
+ }
+ if (irq <= 0)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ }
+ if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->sfp_bus)
sfp_upstream_start(pl->sfp_bus);
@@ -936,8 +1014,11 @@ void phylink_stop(struct phylink *pl)
phy_stop(pl->phydev);
if (pl->sfp_bus)
sfp_upstream_stop(pl->sfp_bus);
- if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
- del_timer_sync(&pl->link_poll);
+ del_timer_sync(&pl->link_poll);
+ if (pl->link_irq) {
+ free_irq(pl->link_irq, pl);
+ pl->link_irq = 0;
+ }
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
}
@@ -1239,7 +1320,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
switch (pl->link_an_mode) {
case MLO_AN_PHY:
/* Silently mark the carrier down, and then trigger a resolve */
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
phylink_run_resolve(pl);
break;
@@ -1342,8 +1424,8 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee);
*
* FIXME: should deal with negotiation state too.
*/
-static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg,
- struct phylink_link_state *state, bool aneg)
+static int phylink_mii_emul_read(unsigned int reg,
+ struct phylink_link_state *state)
{
struct fixed_phy_status fs;
int val;
@@ -1358,8 +1440,6 @@ static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg,
if (reg == MII_BMSR) {
if (!state->an_complete)
val &= ~BMSR_ANEGCOMPLETE;
- if (!aneg)
- val &= ~BMSR_ANEGCAPABLE;
}
return val;
}
@@ -1455,8 +1535,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
- val = phylink_mii_emul_read(pl->netdev, reg, &state,
- true);
+ val = phylink_mii_emul_read(reg, &state);
}
break;
@@ -1469,8 +1548,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
if (val < 0)
return val;
- val = phylink_mii_emul_read(pl->netdev, reg, &state,
- true);
+ val = phylink_mii_emul_read(reg, &state);
}
break;
}
@@ -1573,6 +1651,20 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL_GPL(phylink_mii_ioctl);
+static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus)
+{
+ struct phylink *pl = upstream;
+
+ pl->netdev->sfp_bus = bus;
+}
+
+static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
+{
+ struct phylink *pl = upstream;
+
+ pl->netdev->sfp_bus = NULL;
+}
+
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
@@ -1601,8 +1693,8 @@ static int phylink_sfp_module_insert(void *upstream,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
- netdev_err(pl->netdev, "validation with support %*pb failed: %d\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation with support %*pb failed: %d\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
@@ -1610,26 +1702,26 @@ static int phylink_sfp_module_insert(void *upstream,
iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
- netdev_err(pl->netdev,
- "selection of interface failed, advertisement %*pb\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising);
+ phylink_err(pl,
+ "selection of interface failed, advertisement %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising);
return -EINVAL;
}
config.interface = iface;
ret = phylink_validate(pl, support1, &config);
if (ret) {
- netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
- netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
@@ -1648,9 +1740,9 @@ static int phylink_sfp_module_insert(void *upstream,
changed = true;
- netdev_info(pl->netdev, "switched to %s/%s link mode\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface));
+ phylink_info(pl, "switched to %s/%s link mode\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface));
}
pl->link_port = port;
@@ -1694,6 +1786,8 @@ static void phylink_sfp_disconnect_phy(void *upstream)
}
static const struct sfp_upstream_ops sfp_phylink_ops = {
+ .attach = phylink_sfp_attach,
+ .detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
.link_up = phylink_sfp_link_up,
.link_down = phylink_sfp_link_down,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index e9c187946cca..b23fc41896ef 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -24,7 +24,6 @@ struct sfp_bus {
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
- struct net_device *netdev;
struct phy_device *phydev;
bool registered;
@@ -351,7 +350,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
- bus->netdev->sfp_bus = bus;
+ bus->upstream_ops->attach(bus->upstream, bus);
bus->registered = true;
return 0;
}
@@ -360,8 +359,8 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = bus->upstream_ops;
- bus->netdev->sfp_bus = NULL;
if (bus->registered) {
+ bus->upstream_ops->detach(bus->upstream, bus);
if (bus->started)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
@@ -443,13 +442,11 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
{
bus->upstream_ops = NULL;
bus->upstream = NULL;
- bus->netdev = NULL;
}
/**
* sfp_register_upstream() - Register the neighbouring device
* @fwnode: firmware node for the SFP bus
- * @ndev: network device associated with the interface
* @upstream: the upstream private data
* @ops: the upstream's &struct sfp_upstream_ops
*
@@ -460,7 +457,7 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
* On error, returns %NULL.
*/
struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- struct net_device *ndev, void *upstream,
+ void *upstream,
const struct sfp_upstream_ops *ops)
{
struct sfp_bus *bus = sfp_bus_get(fwnode);
@@ -470,7 +467,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
rtnl_lock();
bus->upstream_ops = ops;
bus->upstream = upstream;
- bus->netdev = ndev;
if (bus->sfp) {
ret = sfp_register_bus(bus);
@@ -592,7 +588,7 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
bus->sfp = sfp;
bus->socket_ops = ops;
- if (bus->netdev) {
+ if (bus->upstream_ops) {
ret = sfp_register_bus(bus);
if (ret)
sfp_socket_clear(bus);
@@ -612,7 +608,7 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- if (bus->netdev)
+ if (bus->upstream_ops)
sfp_unregister_bus(bus);
sfp_socket_clear(bus);
rtnl_unlock();
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 71812be0ac64..a991c80e6567 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -1798,6 +1799,7 @@ static void sfp_cleanup(void *data)
static int sfp_probe(struct platform_device *pdev)
{
const struct sff_data *sff;
+ struct i2c_adapter *i2c;
struct sfp *sfp;
bool poll = false;
int irq, err, i;
@@ -1817,7 +1819,6 @@ static int sfp_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *id;
- struct i2c_adapter *i2c;
struct device_node *np;
id = of_match_node(sfp_of_match, node);
@@ -1834,14 +1835,32 @@ static int sfp_probe(struct platform_device *pdev)
i2c = of_find_i2c_adapter_by_node(np);
of_node_put(np);
- if (!i2c)
- return -EPROBE_DEFER;
-
- err = sfp_i2c_configure(sfp, i2c);
- if (err < 0) {
- i2c_put_adapter(i2c);
- return err;
+ } else if (has_acpi_companion(&pdev->dev)) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct fwnode_handle *fw = acpi_fwnode_handle(adev);
+ struct fwnode_reference_args args;
+ struct acpi_handle *acpi_handle;
+ int ret;
+
+ ret = acpi_node_get_property_reference(fw, "i2c-bus", 0, &args);
+ if (ACPI_FAILURE(ret) || !is_acpi_device_node(args.fwnode)) {
+ dev_err(&pdev->dev, "missing 'i2c-bus' property\n");
+ return -ENODEV;
}
+
+ acpi_handle = ACPI_HANDLE_FWNODE(args.fwnode);
+ i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
+ } else {
+ return -EINVAL;
+ }
+
+ if (!i2c)
+ return -EPROBE_DEFER;
+
+ err = sfp_i2c_configure(sfp, i2c);
+ if (err < 0) {
+ i2c_put_adapter(i2c);
+ return err;
}
for (i = 0; i < GPIO_MAX; i++)
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 8ac33ca9ac3a..e89cdebae6f1 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1008,7 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
/* Any address will do - we take the first */
- const struct in_ifaddr *ifa = in_dev->ifa_list;
+ const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa) {
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
@@ -1103,7 +1103,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- struct in_ifaddr *ifa=in_dev->ifa_list;
+ const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 89984fcab01e..3f48f05dd2a6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3247,6 +3247,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
+ .ndo_fix_features = vmxnet3_fix_features,
.ndo_set_features = vmxnet3_set_features,
.ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
@@ -3651,13 +3652,19 @@ vmxnet3_suspend(struct device *device)
}
if (adapter->wol & WAKE_ARP) {
- in_dev = in_dev_get(netdev);
- if (!in_dev)
+ rcu_read_lock();
+
+ in_dev = __in_dev_get_rcu(netdev);
+ if (!in_dev) {
+ rcu_read_unlock();
goto skip_arp;
+ }
- ifa = (struct in_ifaddr *)in_dev->ifa_list;
- if (!ifa)
+ ifa = rcu_dereference(in_dev->ifa_list);
+ if (!ifa) {
+ rcu_read_unlock();
goto skip_arp;
+ }
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
sizeof(struct arphdr) + /* ARP header */
@@ -3677,7 +3684,9 @@ vmxnet3_suspend(struct device *device)
/* The Unicast IPv4 address in 'tip' field. */
arpreq += 2 * ETH_ALEN + sizeof(u32);
- *(u32 *)arpreq = ifa->ifa_address;
+ *(__be32 *)arpreq = ifa->ifa_address;
+
+ rcu_read_unlock();
/* The mask for the relevant bits. */
pmConf->filters[i].mask[0] = 0x00;
@@ -3686,7 +3695,6 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[3] = 0x00;
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
- in_dev_put(in_dev);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 559db051a500..0a38c76688ab 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -257,6 +257,16 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
+netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a2c554f8a61b..1cc1cd4aaa59 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,12 +69,12 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.17.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01041100
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -454,6 +454,9 @@ vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
+netdev_features_t
+vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
+
int
vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5994d5415a03..75056b95b31f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -471,14 +471,19 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
+static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+{
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
+ return eth_vni_hash(mac, vni);
+ else
+ return eth_hash(mac);
+}
+
/* Hash chain to use given mac address */
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
- else
- return &vxlan->fdb_head[eth_hash(mac)];
+ return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
}
/* Look up Ethernet address in forwarding table */
@@ -593,8 +598,8 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
return -EINVAL;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
@@ -602,14 +607,16 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
f, rdst,
extack);
if (rc)
- goto out;
+ goto unlock;
}
}
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ return 0;
-out:
- spin_unlock_bh(&vxlan->hash_lock);
+unlock:
+ spin_unlock_bh(&vxlan->hash_lock[h]);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@ -625,14 +632,15 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
return;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
if (f->vni == vni)
list_for_each_entry(rdst, &f->remotes, list)
rdst->offloaded = false;
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
+
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@ -1108,6 +1116,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
__be16 port;
__be32 src_vni, vni;
u32 ifindex;
+ u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -1126,12 +1135,13 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
true, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -1179,16 +1189,18 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
__be32 src_vni, vni;
__be16 port;
u32 ifindex;
+ u32 hash_index;
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
if (err)
return err;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -1300,8 +1312,10 @@ static bool vxlan_snoop(struct net_device *dev,
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
+ u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
+
/* learned new entry */
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[hash_index]);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@ -1312,7 +1326,7 @@ static bool vxlan_snoop(struct net_device *dev,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, true, NULL);
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[hash_index]);
}
return false;
@@ -2222,7 +2236,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
- if (likely(!IS_ERR(rt))) {
+ if (!IS_ERR(rt)) {
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to %pI4\n", &daddr);
ip_rt_put(rt);
@@ -2702,7 +2716,7 @@ static void vxlan_cleanup(struct timer_list *t)
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2724,7 +2738,7 @@ static void vxlan_cleanup(struct timer_list *t)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[h]);
}
mod_timer(&vxlan->age_timer, next_timer);
@@ -2767,12 +2781,13 @@ static int vxlan_init(struct net_device *dev)
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
{
struct vxlan_fdb *f;
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static void vxlan_uninit(struct net_device *dev)
@@ -2817,9 +2832,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2829,8 +2845,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f, true, true);
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
}
/* Cleanup timer and forwarding table on shutdown */
@@ -3014,7 +3030,6 @@ static void vxlan_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
INIT_LIST_HEAD(&vxlan->next);
- spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -3022,8 +3037,10 @@ static void vxlan_setup(struct net_device *dev)
gro_cells_init(&vxlan->gro_cells, dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h)
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_init(&vxlan->hash_lock[h]);
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+ }
}
static void vxlan_ether_setup(struct net_device *dev)
@@ -3917,7 +3934,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
- spin_lock_bh(&vxlan->hash_lock);
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@ -3928,7 +3947,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
conf.remote_ifindex,
NTF_SELF, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
}
@@ -3940,7 +3959,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@ -4195,8 +4214,11 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4212,7 +4234,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static int
@@ -4221,11 +4243,13 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
+ u32 hash_index;
int err;
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@ -4235,7 +4259,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
false, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -4246,9 +4270,11 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ u32 hash_index;
int err = 0;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4262,7 +4288,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 61d8f6389c64..a030f5aa6b95 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -193,16 +193,15 @@ static int cisco_rx(struct sk_buff *skb)
mask = ~cpu_to_be32(0); /* is the mask correct? */
if (in_dev != NULL) {
- struct in_ifaddr **ifap = &in_dev->ifa_list;
+ const struct in_ifaddr *ifa;
- while (*ifap != NULL) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (strcmp(dev->name,
- (*ifap)->ifa_label) == 0) {
- addr = (*ifap)->ifa_local;
- mask = (*ifap)->ifa_mask;
+ ifa->ifa_label) == 0) {
+ addr = ifa->ifa_local;
+ mask = ifa->ifa_mask;
break;
}
- ifap = &(*ifap)->ifa_next;
}
cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5477a014e1fb..37cf602d8adf 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2194,13 +2194,13 @@ static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
if (!in_dev)
return 0;
- ifa = in_dev->ifa_list;
+ ifa = rtnl_dereference(in_dev->ifa_list);
memset(&ips, 0, sizeof(ips));
/* Configure IP addr only if IP address count < MAX_IP_ADDRS */
while (index < MAX_IP_ADDRS && ifa) {
ips[index] = ifa->ifa_local;
- ifa = ifa->ifa_next;
+ ifa = rtnl_dereference(ifa->ifa_next);
index++;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index e11a4bb67172..5a7cdb981789 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3268,7 +3268,7 @@ static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
if (!in_dev)
continue;
- ifa = in_dev->ifa_list;
+ ifa = rtnl_dereference(in_dev->ifa_list);
if (!ifa || !ifa->ifa_local)
continue;
ips[i] = ifa->ifa_local;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 783198844dd7..240f762b3749 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -633,7 +633,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
unsigned int rx_evtchn)
{
struct task_struct *task;
- int err = -ENOMEM;
+ int err;
BUG_ON(queue->tx_irq);
BUG_ON(queue->task);