summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/bus/mhi/pci_generic.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ipddp.c16
-rw-r--r--drivers/net/bonding/bond_main.c74
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/at91_can.c137
-rw-r--r--drivers/net/can/dev/netlink.c9
-rw-r--r--drivers/net/can/dev/rx-offload.c90
-rw-r--r--drivers/net/can/flexcan.c127
-rw-r--r--drivers/net/can/janz-ican3.c23
-rw-r--r--drivers/net/can/m_can/m_can.c38
-rw-r--r--drivers/net/can/m_can/m_can.h5
-rw-r--r--drivers/net/can/m_can/m_can_platform.c16
-rw-r--r--drivers/net/can/sja1000/peak_pci.c119
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c28
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c12
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c5
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c82
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c19
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c228
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c78
-rw-r--r--drivers/net/dsa/ocelot/felix.c34
-rw-r--r--drivers/net/dsa/ocelot/felix.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h26
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c114
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1189
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c14
-rw-r--r--drivers/net/eql.c24
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c319
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h53
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c530
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c216
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h44
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h19
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c80
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h25
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c148
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c155
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c372
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c756
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c349
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h3
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c753
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c32
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c106
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c620
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c333
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c133
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c21
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c63
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c42
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.h3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c19
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c18
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c54
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c18
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c19
-rw-r--r--drivers/net/hamradio/baycom_epp.c9
-rw-r--r--drivers/net/hamradio/baycom_par.c12
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c12
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c12
-rw-r--r--drivers/net/hamradio/bpqether.c9
-rw-r--r--drivers/net/hamradio/dmascc.c18
-rw-r--r--drivers/net/hamradio/hdlcdrv.c20
-rw-r--r--drivers/net/hamradio/scc.c13
-rw-r--r--drivers/net/hamradio/yam.c19
-rw-r--r--drivers/net/hippi/rrunner.c11
-rw-r--r--drivers/net/hippi/rrunner.h3
-rw-r--r--drivers/net/ipa/Makefile3
-rw-r--r--drivers/net/ipa/gsi.c2
-rw-r--r--drivers/net/ipa/gsi_trans.c34
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_cmd.c51
-rw-r--r--drivers/net/ipa/ipa_cmd.h22
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c15
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c2
-rw-r--r--drivers/net/ipa/ipa_endpoint.c26
-rw-r--r--drivers/net/ipa/ipa_interrupt.c73
-rw-r--r--drivers/net/ipa/ipa_interrupt.h8
-rw-r--r--drivers/net/ipa/ipa_main.c96
-rw-r--r--drivers/net/ipa/ipa_modem.c26
-rw-r--r--drivers/net/ipa/ipa_modem.h4
-rw-r--r--drivers/net/ipa/ipa_qmi.c6
-rw-r--r--drivers/net/ipa/ipa_qmi.h19
-rw-r--r--drivers/net/ipa/ipa_reg.h12
-rw-r--r--drivers/net/ipa/ipa_resource.c3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c5
-rw-r--r--drivers/net/ipa/ipa_table.c40
-rw-r--r--drivers/net/ipa/ipa_table.h16
-rw-r--r--drivers/net/ipa/ipa_uc.c52
-rw-r--r--drivers/net/ipa/ipa_uc.h22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/mctp/Kconfig8
-rw-r--r--drivers/net/mctp/Makefile0
-rw-r--r--drivers/net/mhi/net.c15
-rw-r--r--drivers/net/mhi/proto_mbim.c8
-rw-r--r--drivers/net/netdevsim/bus.c17
-rw-r--r--drivers/net/netdevsim/dev.c4
-rw-r--r--drivers/net/netdevsim/netdev.c6
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c18
-rw-r--r--drivers/net/phy/dp83822.c8
-rw-r--r--drivers/net/phy/intel-xway.c76
-rw-r--r--drivers/net/phy/marvell10g.c89
-rw-r--r--drivers/net/phy/mxl-gpy.c727
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c14
-rw-r--r--drivers/net/phy/phylink.c21
-rw-r--r--drivers/net/plip/plip.c12
-rw-r--r--drivers/net/ppp/ppp_generic.c6
-rw-r--r--drivers/net/sb1000.c20
-rw-r--r--drivers/net/slip/slip.c13
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c5
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c6
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c5
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rtl8150.c5
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/veth.c307
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h50
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c221
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h22
-rw-r--r--drivers/net/wan/c101.c33
-rw-r--r--drivers/net/wan/cosa.c15
-rw-r--r--drivers/net/wan/farsync.c123
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c19
-rw-r--r--drivers/net/wan/hdlc.c9
-rw-r--r--drivers/net/wan/hdlc_cisco.c14
-rw-r--r--drivers/net/wan/hdlc_fr.c40
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hdlc_raw.c14
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c14
-rw-r--r--drivers/net/wan/hdlc_x25.c16
-rw-r--r--drivers/net/wan/hostess_sv11.c7
-rw-r--r--drivers/net/wan/ixp4xx_hss.c22
-rw-r--r--drivers/net/wan/lmc/lmc.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c33
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c7
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h1
-rw-r--r--drivers/net/wan/n2.c32
-rw-r--r--drivers/net/wan/pc300too.c44
-rw-r--r--drivers/net/wan/pci200syn.c32
-rw-r--r--drivers/net/wan/sbni.c15
-rw-r--r--drivers/net/wan/sealevel.c10
-rw-r--r--drivers/net/wan/wanxl.c21
-rw-r--r--drivers/net/wireless/cisco/airo.c15
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h3
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c30
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c19
-rw-r--r--drivers/nfc/fdp/fdp.c38
-rw-r--r--drivers/nfc/fdp/fdp.h4
-rw-r--r--drivers/nfc/fdp/i2c.c8
-rw-r--r--drivers/nfc/mei_phy.c4
-rw-r--r--drivers/nfc/mei_phy.h2
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/microread.c14
-rw-r--r--drivers/nfc/microread/microread.h6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c16
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/main.c6
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h6
-rw-r--r--drivers/nfc/nfcmrvl/spi.c4
-rw-r--r--drivers/nfc/nfcmrvl/uart.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c2
-rw-r--r--drivers/nfc/nfcsim.c4
-rw-r--r--drivers/nfc/nxp-nci/core.c2
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/pn544/pn544.c17
-rw-r--r--drivers/nfc/pn544/pn544.h7
-rw-r--r--drivers/nfc/port100.c47
-rw-r--r--drivers/nfc/s3fwrn5/core.c7
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c12
-rw-r--r--drivers/nfc/s3fwrn5/nci.c8
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/st-nci/core.c5
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nfc/st-nci/ndlc.c6
-rw-r--r--drivers/nfc/st-nci/ndlc.h8
-rw-r--r--drivers/nfc/st-nci/spi.c2
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c2
-rw-r--r--drivers/nfc/st21nfca/core.c6
-rw-r--r--drivers/nfc/st21nfca/i2c.c6
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h4
-rw-r--r--drivers/nfc/st21nfca/vendor_cmds.c2
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/nfc/trf7970a.c19
-rw-r--r--drivers/nfc/virtual_ncidev.c4
-rw-r--r--drivers/s390/cio/ccwgroup.c22
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/qeth_core.h51
-rw-r--r--drivers/s390/net/qeth_core_main.c189
-rw-r--r--drivers/s390/net/qeth_core_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.h23
-rw-r--r--drivers/s390/net/qeth_core_sys.c5
-rw-r--r--drivers/s390/net/qeth_ethtool.c7
-rw-r--r--drivers/s390/net/qeth_l2_main.c175
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/staging/octeon/ethernet.c12
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c14
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c76
-rw-r--r--drivers/tty/synclink_gt.c19
456 files changed, 9615 insertions, 4969 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 9e4bd751db79..81ce81a75fc6 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3536,7 +3536,7 @@ static int idt77252_preset(struct idt77252_dev *card)
return -1;
}
if (!(pci_command & PCI_COMMAND_IO)) {
- printk("%s: PCI_COMMAND: %04x (???)\n",
+ printk("%s: PCI_COMMAND: %04x (?)\n",
card->name, pci_command);
deinit_card(card);
return (-1);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 4dd1077354af..b33b9d75e8af 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -32,6 +32,7 @@
* @edl: emergency download mode firmware path (if any)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
*/
@@ -42,6 +43,7 @@ struct mhi_pci_dev_info {
const char *edl;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int mru_default;
bool sideband_wake;
};
@@ -272,6 +274,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -664,6 +667,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+ mhi_cntrl->mru = info->mru_default;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 6eaefea0520e..5ac53dcb3a6a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4050,16 +4050,15 @@ static int hdlcdev_close(struct net_device *dev)
* called by network layer to process IOCTL call to network device
*
* dev pointer to network device structure
- * ifr pointer to network interface request structure
- * cmd IOCTL command code
+ * ifs pointer to network interface settings structure
*
* returns 0 if success, otherwise error code
*/
-static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdev_wan_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
MGSLPC_INFO *info = dev_to_port(dev);
unsigned int flags;
@@ -4070,17 +4069,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (info->port.count)
return -EBUSY;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
memset(&new_line, 0, size);
- switch(ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
@@ -4148,9 +4144,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
tty_kref_put(tty);
}
return 0;
-
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -4225,7 +4220,7 @@ static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_siocwandev = hdlcdev_wan_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index abf60f4d9203..0aa8629fdf62 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1745,10 +1745,10 @@ static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- if (!priv->rn_ops->ndo_do_ioctl)
+ if (!priv->rn_ops->ndo_eth_ioctl)
return -EOPNOTSUPP;
- return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
+ return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
}
static int ipoib_dev_init(struct net_device *dev)
@@ -2078,7 +2078,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
- .ndo_do_ioctl = ipoib_ioctl,
+ .ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -2093,7 +2093,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
- .ndo_do_ioctl = ipoib_ioctl,
+ .ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_default_pf = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6977f8248df7..56213a8a1ec5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,6 +483,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/mctp/Kconfig"
+
source "drivers/net/mdio/Kconfig"
source "drivers/net/pcs/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7ffd2d03efaf..a48a664605a3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_WWAN) += wwan/
+obj-$(CONFIG_MCTP) += mctp/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 51cf5eca9c7f..5566daefbff4 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -54,11 +54,12 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb,
static int ipddp_create(struct ipddp_route *new_rt);
static int ipddp_delete(struct ipddp_route *rt);
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
-static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static const struct net_device_ops ipddp_netdev_ops = {
.ndo_start_xmit = ipddp_xmit,
- .ndo_do_ioctl = ipddp_ioctl,
+ .ndo_siocdevprivate = ipddp_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -268,15 +269,18 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
return NULL;
}
-static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- struct ipddp_route __user *rt = ifr->ifr_data;
struct ipddp_route rcp, rcp2, *rp;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
if(!capable(CAP_NET_ADMIN))
return -EPERM;
- if(copy_from_user(&rcp, rt, sizeof(rcp)))
+ if (copy_from_user(&rcp, data, sizeof(rcp)))
return -EFAULT;
switch(cmd)
@@ -296,7 +300,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
- if (copy_to_user(rt, &rcp2,
+ if (copy_to_user(data, &rcp2,
sizeof(struct ipddp_route)))
return -EFAULT;
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 31730efa7538..bec8ceaff98f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -732,7 +732,7 @@ static int bond_check_dev_link(struct bonding *bond,
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_do_ioctl;
+ ioctl = slave_ops->ndo_eth_ioctl;
if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
@@ -756,7 +756,7 @@ static int bond_check_dev_link(struct bonding *bond,
}
}
- /* If reporting, report that either there's no dev->do_ioctl,
+ /* If reporting, report that either there's no ndo_eth_ioctl,
* or both SIOCGMIIREG and get_link failed (meaning that we
* cannot report link status). If not reporting, pretend
* we're ok.
@@ -1733,7 +1733,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_do_ioctl == NULL) {
+ slave_ops->ndo_eth_ioctl == NULL) {
slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
}
@@ -3962,20 +3962,13 @@ static void bond_get_stats(struct net_device *bond_dev,
rcu_read_unlock();
}
-static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device *slave_dev = NULL;
- struct ifbond k_binfo;
- struct ifbond __user *u_binfo = NULL;
- struct ifslave k_sinfo;
- struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
- struct bond_opt_value newval;
- struct net *net;
- int res = 0;
+ int res;
- netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
+ netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
switch (cmd) {
case SIOCGMIIPHY:
@@ -4000,7 +3993,28 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
}
return 0;
- case BOND_INFO_QUERY_OLD:
+ default:
+ res = -EOPNOTSUPP;
+ }
+
+ return res;
+}
+
+static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct net_device *slave_dev = NULL;
+ struct ifbond k_binfo;
+ struct ifbond __user *u_binfo = NULL;
+ struct ifslave k_sinfo;
+ struct ifslave __user *u_sinfo = NULL;
+ struct bond_opt_value newval;
+ struct net *net;
+ int res = 0;
+
+ netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
+
+ switch (cmd) {
case SIOCBONDINFOQUERY:
u_binfo = (struct ifbond __user *)ifr->ifr_data;
@@ -4012,7 +4026,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return -EFAULT;
return 0;
- case BOND_SLAVE_INFO_QUERY_OLD:
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
@@ -4042,19 +4055,15 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return -ENODEV;
switch (cmd) {
- case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
res = bond_enslave(bond_dev, slave_dev, NULL);
break;
- case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
break;
- case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
res = bond_set_dev_addr(bond_dev, slave_dev);
break;
- case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
bond_opt_initstr(&newval, slave_dev->name);
res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
@@ -4067,6 +4076,29 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
+ void __user *data, int cmd)
+{
+ struct ifreq ifrdata = { .ifr_data = data };
+
+ switch (cmd) {
+ case BOND_INFO_QUERY_OLD:
+ return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
+ case BOND_SLAVE_INFO_QUERY_OLD:
+ return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
+ case BOND_ENSLAVE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
+ case BOND_RELEASE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
+ case BOND_SETHWADDR_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
+ case BOND_CHANGE_ACTIVE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -4955,7 +4987,9 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_start_xmit = bond_start_xmit,
.ndo_select_queue = bond_select_queue,
.ndo_get_stats64 = bond_get_stats,
- .ndo_do_ioctl = bond_do_ioctl,
+ .ndo_eth_ioctl = bond_eth_ioctl,
+ .ndo_siocbond = bond_do_ioctl,
+ .ndo_siocdevprivate = bond_siocdevprivate,
.ndo_change_rx_flags = bond_change_rx_flags,
.ndo_set_rx_mode = bond_set_rx_mode,
.ndo_change_mtu = bond_change_mtu,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e355d3974977..fff259247d52 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -97,7 +97,8 @@ config CAN_AT91
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on OF && HAS_IOMEM
+ depends on OF || COLDFIRE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 04d0bb3ffe89..b06af90a9964 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -43,14 +43,14 @@ enum at91_reg {
};
/* Mailbox registers (0 <= i <= 15) */
-#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
-#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
-#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
-#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
-#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
-#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
-#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
-#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
+#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20)))
+#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20)))
+#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20)))
+#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20)))
+#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20)))
+#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20)))
+#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20)))
+#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20)))
/* Register bits */
#define AT91_MR_CANEN BIT(0)
@@ -87,19 +87,19 @@ enum at91_mb_mode {
};
/* Interrupt mask bits */
-#define AT91_IRQ_ERRA (1 << 16)
-#define AT91_IRQ_WARN (1 << 17)
-#define AT91_IRQ_ERRP (1 << 18)
-#define AT91_IRQ_BOFF (1 << 19)
-#define AT91_IRQ_SLEEP (1 << 20)
-#define AT91_IRQ_WAKEUP (1 << 21)
-#define AT91_IRQ_TOVF (1 << 22)
-#define AT91_IRQ_TSTP (1 << 23)
-#define AT91_IRQ_CERR (1 << 24)
-#define AT91_IRQ_SERR (1 << 25)
-#define AT91_IRQ_AERR (1 << 26)
-#define AT91_IRQ_FERR (1 << 27)
-#define AT91_IRQ_BERR (1 << 28)
+#define AT91_IRQ_ERRA BIT(16)
+#define AT91_IRQ_WARN BIT(17)
+#define AT91_IRQ_ERRP BIT(18)
+#define AT91_IRQ_BOFF BIT(19)
+#define AT91_IRQ_SLEEP BIT(20)
+#define AT91_IRQ_WAKEUP BIT(21)
+#define AT91_IRQ_TOVF BIT(22)
+#define AT91_IRQ_TSTP BIT(23)
+#define AT91_IRQ_CERR BIT(24)
+#define AT91_IRQ_SERR BIT(25)
+#define AT91_IRQ_AERR BIT(26)
+#define AT91_IRQ_FERR BIT(27)
+#define AT91_IRQ_BERR BIT(28)
#define AT91_IRQ_ERR_ALL (0x1fff0000)
#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
@@ -163,7 +163,7 @@ static const struct can_bittiming_const at91_bittiming_const = {
.tseg2_min = 2,
.tseg2_max = 8,
.sjw_max = 4,
- .brp_min = 2,
+ .brp_min = 2,
.brp_max = 128,
.brp_inc = 1,
};
@@ -281,19 +281,20 @@ static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
- u32 value)
+ u32 value)
{
writel_relaxed(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
- unsigned int mb, enum at91_mb_mode mode, int prio)
+ unsigned int mb, enum at91_mb_mode mode,
+ int prio)
{
at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
}
static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
- enum at91_mb_mode mode)
+ enum at91_mb_mode mode)
{
set_mb_mode_prio(priv, mb, mode, 0);
}
@@ -316,8 +317,7 @@ static void at91_setup_mailboxes(struct net_device *dev)
unsigned int i;
u32 reg_mid;
- /*
- * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+ /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
* mailbox is disabled. The next 11 mailboxes are used as a
* reception FIFO. The last mailbox is configured with
* overwrite option. The overwrite flag indicates a FIFO
@@ -368,7 +368,7 @@ static int at91_set_bittiming(struct net_device *dev)
}
static int at91_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
@@ -423,8 +423,7 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
priv->can.state = state;
}
-/*
- * theory of operation:
+/* theory of operation:
*
* According to the datasheet priority 0 is the highest priority, 15
* is the lowest. If two mailboxes have the same priority level the
@@ -486,8 +485,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
- /*
- * we have to stop the queue and deliver all messages in case
+ /* we have to stop the queue and deliver all messages in case
* of a prio+mb counter wrap around. This is the case if
* tx_next buffer prio and mailbox equals 0.
*
@@ -515,6 +513,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
static inline void at91_activate_rx_low(const struct at91_priv *priv)
{
u32 mask = get_mb_rx_low_mask(priv);
+
at91_write(priv, AT91_TCR, mask);
}
@@ -526,9 +525,10 @@ static inline void at91_activate_rx_low(const struct at91_priv *priv)
* Reenables given mailbox for reception of new CAN messages
*/
static inline void at91_activate_rx_mb(const struct at91_priv *priv,
- unsigned int mb)
+ unsigned int mb)
{
u32 mask = 1 << mb;
+
at91_write(priv, AT91_TCR, mask);
}
@@ -568,7 +568,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
* given can frame. "mb" and "cf" must be valid.
*/
static void at91_read_mb(struct net_device *dev, unsigned int mb,
- struct can_frame *cf)
+ struct can_frame *cf)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_msr, reg_mid;
@@ -582,9 +582,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
reg_msr = at91_read(priv, AT91_MSR(mb));
cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);
- if (reg_msr & AT91_MSR_MRTR)
+ if (reg_msr & AT91_MSR_MRTR) {
cf->can_id |= CAN_RTR_FLAG;
- else {
+ } else {
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
}
@@ -685,7 +685,7 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > get_mb_rx_low_last(priv) &&
reg_sr & get_mb_rx_low_mask(priv))
netdev_info(dev,
- "order of incoming frames cannot be guaranteed\n");
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
@@ -718,7 +718,7 @@ static int at91_poll_rx(struct net_device *dev, int quota)
}
static void at91_poll_err_frame(struct net_device *dev,
- struct can_frame *cf, u32 reg_sr)
+ struct can_frame *cf, u32 reg_sr)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -796,8 +796,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
if (reg_sr & get_irq_mb_rx(priv))
work_done += at91_poll_rx(dev, quota - work_done);
- /*
- * The error bits are clear on read,
+ /* The error bits are clear on read,
* so use saved value from irq handler.
*/
reg_sr |= priv->reg_sr;
@@ -807,6 +806,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
if (work_done < quota) {
/* enable IRQs for frame errors and all mailboxes >= rx_next */
u32 reg_ier = AT91_IRQ_ERR_FRAME;
+
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
napi_complete_done(napi, work_done);
@@ -816,8 +816,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
return work_done;
}
-/*
- * theory of operation:
+/* theory of operation:
*
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
@@ -846,8 +845,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
/* Disable irq for this TX mailbox */
at91_write(priv, AT91_IDR, 1 << mb);
- /*
- * only echo if mailbox signals us a transfer
+ /* only echo if mailbox signals us a transfer
* complete (MSR_MRDY). Otherwise it's a tansfer
* abort. "can_bus_off()" takes care about the skbs
* parked in the echo queue.
@@ -862,8 +860,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
}
}
- /*
- * restart queue if we don't have a wrap around but restart if
+ /* restart queue if we don't have a wrap around but restart if
* we get a TX int for the last can frame directly before a
* wrap around.
*/
@@ -873,7 +870,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
}
static void at91_irq_err_state(struct net_device *dev,
- struct can_frame *cf, enum can_state new_state)
+ struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_idr = 0, reg_ier = 0;
@@ -883,8 +880,7 @@ static void at91_irq_err_state(struct net_device *dev,
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
- /*
- * from: ERROR_ACTIVE
+ /* from: ERROR_ACTIVE
* to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
* => : there was a warning int
*/
@@ -900,8 +896,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
fallthrough;
case CAN_STATE_ERROR_WARNING:
- /*
- * from: ERROR_ACTIVE, ERROR_WARNING
+ /* from: ERROR_ACTIVE, ERROR_WARNING
* to : ERROR_PASSIVE, BUS_OFF
* => : error passive int
*/
@@ -917,8 +912,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
break;
case CAN_STATE_BUS_OFF:
- /*
- * from: BUS_OFF
+ /* from: BUS_OFF
* to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
*/
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
@@ -935,12 +929,10 @@ static void at91_irq_err_state(struct net_device *dev,
break;
}
-
/* process state changes depending on the new state */
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
- /*
- * actually we want to enable AT91_IRQ_WARN here, but
+ /* actually we want to enable AT91_IRQ_WARN here, but
* it screws up the system under certain
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
@@ -983,7 +975,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
static int at91_get_state_by_bec(const struct net_device *dev,
- enum can_state *state)
+ enum can_state *state)
{
struct can_berr_counter bec;
int err;
@@ -1004,7 +996,6 @@ static int at91_get_state_by_bec(const struct net_device *dev,
return 0;
}
-
static void at91_irq_err(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -1018,15 +1009,15 @@ static void at91_irq_err(struct net_device *dev)
reg_sr = at91_read(priv, AT91_SR);
/* we need to look at the unmasked reg_sr */
- if (unlikely(reg_sr & AT91_IRQ_BOFF))
+ if (unlikely(reg_sr & AT91_IRQ_BOFF)) {
new_state = CAN_STATE_BUS_OFF;
- else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+ } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) {
new_state = CAN_STATE_ERROR_PASSIVE;
- else if (unlikely(reg_sr & AT91_IRQ_WARN))
+ } else if (unlikely(reg_sr & AT91_IRQ_WARN)) {
new_state = CAN_STATE_ERROR_WARNING;
- else if (likely(reg_sr & AT91_IRQ_ERRA))
+ } else if (likely(reg_sr & AT91_IRQ_ERRA)) {
new_state = CAN_STATE_ERROR_ACTIVE;
- else {
+ } else {
netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1053,8 +1044,7 @@ static void at91_irq_err(struct net_device *dev)
priv->can.state = new_state;
}
-/*
- * interrupt handler
+/* interrupt handler
*/
static irqreturn_t at91_irq(int irq, void *dev_id)
{
@@ -1075,8 +1065,7 @@ static irqreturn_t at91_irq(int irq, void *dev_id)
/* Receive or error interrupt? -> napi */
if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
- /*
- * The error bits are clear on read,
+ /* The error bits are clear on read,
* save for later use.
*/
priv->reg_sr = reg_sr;
@@ -1133,8 +1122,7 @@ static int at91_open(struct net_device *dev)
return err;
}
-/*
- * stop CAN bus activity
+/* stop CAN bus activity
*/
static int at91_close(struct net_device *dev)
{
@@ -1176,8 +1164,8 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
-static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mb0_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
@@ -1187,8 +1175,9 @@ static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
}
-static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t mb0_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct net_device *ndev = to_net_dev(dev);
struct at91_priv *priv = netdev_priv(ndev);
@@ -1222,7 +1211,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
return ret;
}
-static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+static DEVICE_ATTR_RW(mb0_id);
static struct attribute *at91_sysfs_attrs[] = {
&dev_attr_mb0_id.attr,
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index e38c2566aff4..147c23d7dab7 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -47,7 +47,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ if (!is_can_fd)
return -EOPNOTSUPP;
}
@@ -132,10 +132,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->ctrlmode |= maskedflags;
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ if (priv->ctrlmode & CAN_CTRLMODE_FD) {
dev->mtu = CANFD_MTU;
- else
+ } else {
dev->mtu = CAN_MTU;
+ memset(&priv->data_bittiming, 0,
+ sizeof(priv->data_bittiming));
+ }
}
if (data[IFLA_CAN_RESTART_MS]) {
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index ab2c1543786c..37b0cc65237b 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 Protonic Holland,
* David Jander
- * Copyright (C) 2014-2017 Pengutronix,
+ * Copyright (C) 2014-2021 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
*/
@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 pending)
{
- struct sk_buff_head skb_queue;
unsigned int i;
-
- __skb_queue_head_init(&skb_queue);
+ int received = 0;
for (i = offload->mb_first;
can_rx_offload_le(offload, i, offload->mb_last);
@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
if (IS_ERR_OR_NULL(skb))
continue;
- __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
- }
-
- if (!skb_queue_empty(&skb_queue)) {
- unsigned long flags;
- u32 queue_len;
-
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- queue_len = skb_queue_len(&offload->skb_queue);
- if (queue_len > offload->skb_queue_len_max / 8)
- netdev_dbg(offload->dev, "%s: queue_len=%d\n",
- __func__, queue_len);
-
- can_rx_offload_schedule(offload);
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
+ received++;
}
- return skb_queue_len(&skb_queue);
+ return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
if (!skb)
break;
- skb_queue_tail(&offload->skb_queue, skb);
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
received++;
}
- if (received)
- can_rx_offload_schedule(offload);
-
return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
- unsigned long flags;
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- can_rx_offload_schedule(offload);
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
return 0;
}
@@ -295,13 +272,56 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
return -ENOBUFS;
}
- skb_queue_tail(&offload->skb_queue, skb);
- can_rx_offload_schedule(offload);
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ napi_schedule(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
+
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ local_bh_disable();
+ napi_schedule(&offload->napi);
+ local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
+
static int can_rx_offload_init_queue(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight)
@@ -312,6 +332,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
+ __skb_queue_head_init(&offload->skb_irq_queue);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
@@ -373,5 +394,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
{
netif_napi_del(&offload->napi);
skb_queue_purge(&offload->skb_queue);
+ __skb_queue_purge(&offload->skb_irq_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 57f3635ad8d7..54ffb796a320 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/can/platform/flexcan.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -208,18 +209,19 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
* Filter? connected? Passive detection ption in MB Supported?
- * MX25 FlexCAN2 03.00.00.00 no no no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no no
- * MX35 FlexCAN2 03.00.00.00 no no no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no
- * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes
- * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
- * VF610 FlexCAN3 ? no yes no yes yes? no
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
- * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes
+ * MCF5441X FlexCAN2 ? no yes no no yes no 16
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
+ * VF610 FlexCAN3 ? no yes no yes yes? no 64
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -246,6 +248,10 @@
#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
/* Setup stop mode with SCU firmware to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+/* Setup 3 separate interrupts, main, boff and err */
+#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
+/* Setup 16 mailboxes */
+#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -363,6 +369,9 @@ struct flexcan_priv {
struct regulator *reg_xceiver;
struct flexcan_stop_mode stm;
+ int irq_boff;
+ int irq_err;
+
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
@@ -371,6 +380,11 @@ struct flexcan_priv {
void (*write)(u32 val, void __iomem *addr);
};
+static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16,
+};
+
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
@@ -637,13 +651,17 @@ static int flexcan_clks_enable(const struct flexcan_priv *priv)
{
int err;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
- return err;
+ if (priv->clk_ipg) {
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+ }
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- clk_disable_unprepare(priv->clk_ipg);
+ if (priv->clk_per) {
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ clk_disable_unprepare(priv->clk_ipg);
+ }
return err;
}
@@ -1198,6 +1216,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
+ if (handled)
+ can_rx_offload_irq_finish(&priv->offload);
+
return handled;
}
@@ -1401,8 +1422,12 @@ static int flexcan_rx_offload_setup(struct net_device *dev)
priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
else
priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
- (sizeof(priv->regs->mb[1]) / priv->mb_size);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16)
+ priv->mb_count = 16;
+ else
+ priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
+ (sizeof(priv->regs->mb[1]) / priv->mb_size);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
priv->tx_mb_reserved =
@@ -1774,6 +1799,18 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_can_rx_offload_disable;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ err = request_irq(priv->irq_boff,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq;
+
+ err = request_irq(priv->irq_err,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_boff;
+ }
+
flexcan_chip_interrupts_enable(dev);
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -1782,6 +1819,10 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_free_irq_boff:
+ free_irq(priv->irq_boff, dev);
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop(dev);
@@ -1803,6 +1844,12 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ free_irq(priv->irq_err, dev);
+ free_irq(priv->irq_boff, dev);
+ }
+
free_irq(dev->irq, dev);
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop_disable_on_error(dev);
@@ -2039,14 +2086,26 @@ static const struct of_device_id flexcan_of_match[] = {
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
+static const struct platform_device_id flexcan_id_table[] = {
+ {
+ .name = "flexcan-mcf5441x",
+ .driver_data = (kernel_ulong_t)&fsl_mcf5441x_devtype_data,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
+
static int flexcan_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
+ struct flexcan_platform_data *pdata;
int err, irq;
u8 clk_src = 1;
u32 clock_freq = 0;
@@ -2064,6 +2123,12 @@ static int flexcan_probe(struct platform_device *pdev)
"clock-frequency", &clock_freq);
of_property_read_u8(pdev->dev.of_node,
"fsl,clk-source", &clk_src);
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ clock_freq = pdata->clock_frequency;
+ clk_src = pdata->clk_src;
+ }
}
if (!clock_freq) {
@@ -2089,7 +2154,14 @@ static int flexcan_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- devtype_data = of_device_get_match_data(&pdev->dev);
+ of_id = of_match_device(flexcan_of_match, &pdev->dev);
+ if (of_id)
+ devtype_data = of_id->data;
+ else if (platform_get_device_id(pdev)->driver_data)
+ devtype_data = (struct flexcan_devtype_data *)
+ platform_get_device_id(pdev)->driver_data;
+ else
+ return -ENODEV;
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
@@ -2133,6 +2205,19 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ priv->irq_boff = platform_get_irq(pdev, 1);
+ if (priv->irq_boff <= 0) {
+ err = -ENODEV;
+ goto failed_platform_get_irq;
+ }
+ priv->irq_err = platform_get_irq(pdev, 2);
+ if (priv->irq_err <= 0) {
+ err = -ENODEV;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
@@ -2170,6 +2255,7 @@ static int flexcan_probe(struct platform_device *pdev)
failed_register:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ failed_platform_get_irq:
free_candev(dev);
return err;
}
@@ -2322,6 +2408,7 @@ static struct platform_driver flexcan_driver = {
},
.probe = flexcan_probe,
.remove = flexcan_remove,
+ .id_table = flexcan_id_table,
};
module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 2a6c918186c0..c68ad56628bd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1815,9 +1815,9 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
* Sysfs Attributes
*/
-static ssize_t ican3_sysfs_show_term(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t termination_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
int ret;
@@ -1834,9 +1834,9 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
}
-static ssize_t ican3_sysfs_set_term(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t termination_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
unsigned long enable;
@@ -1852,18 +1852,17 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
-static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t fwinfo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
}
-static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term,
- ican3_sysfs_set_term);
-static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL);
+static DEVICE_ATTR_RW(termination);
+static DEVICE_ATTR_RO(fwinfo);
static struct attribute *ican3_sysfs_attrs[] = {
&dev_attr_termination.attr,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index bba2a449ac70..0cffaad905c2 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -1058,6 +1059,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
}
}
+ if (cdev->is_peripheral)
+ can_rx_offload_threaded_irq_finish(&cdev->offload);
+
return IRQ_HANDLED;
}
@@ -1436,32 +1440,20 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
@@ -1518,6 +1510,8 @@ static int m_can_close(struct net_device *dev)
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
+ phy_power_off(cdev->transceiver);
+
return 0;
}
@@ -1703,10 +1697,14 @@ static int m_can_open(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
- err = m_can_clk_start(cdev);
+ err = phy_power_on(cdev->transceiver);
if (err)
return err;
+ err = m_can_clk_start(cdev);
+ if (err)
+ goto out_phy_power_off;
+
/* open the can device */
err = open_candev(dev);
if (err) {
@@ -1763,6 +1761,8 @@ out_wq_fail:
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
+out_phy_power_off:
+ phy_power_off(cdev->transceiver);
return err;
}
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ace071c3e58c..56e994376a7b 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
/* m_can lec values */
enum m_can_lec_type {
@@ -82,9 +83,7 @@ struct m_can_classdev {
struct workqueue_struct *tx_wq;
struct work_struct tx_work;
struct sk_buff *tx_skb;
-
- struct can_bittiming_const *bit_timing;
- struct can_bittiming_const *data_timing;
+ struct phy *transceiver;
struct m_can_ops *ops;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 599de0e08cd7..a28c84aa8fa8 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -6,6 +6,7 @@
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -67,6 +68,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
void __iomem *mram_addr;
+ struct phy *transceiver;
int irq, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev,
@@ -80,8 +82,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
if (ret)
goto probe_fail;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource_byname(pdev, "m_can");
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
@@ -101,6 +102,16 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
}
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver)) {
+ ret = PTR_ERR(transceiver);
+ dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
+ goto probe_fail;
+ }
+
+ if (transceiver)
+ mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate;
+
priv->base = addr;
priv->mram_base = mram_addr;
@@ -108,6 +119,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->pm_clock_support = 1;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
+ mcan_class->transceiver = transceiver;
mcan_class->ops = &m_can_plat_ops;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 84eac8cb8686..6db90dc4bc9d 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -28,6 +28,10 @@ MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
+/* FPGA cards FW version registers */
+#define PEAK_VER_REG1 0x40
+#define PEAK_VER_REG2 0x44
+
struct peak_pciec_card;
struct peak_pci_chan {
void __iomem *cfg_base; /* Common for all channels */
@@ -41,9 +45,7 @@ struct peak_pci_chan {
#define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define PEAK_PCI_OCR OCR_TX0_PUSHPULL
-/*
- * Important PITA registers
- */
+/* Important PITA registers */
#define PITA_ICR 0x00 /* Interrupt control register */
#define PITA_GPIOICR 0x18 /* GPIO interface control register */
#define PITA_MISC 0x1C /* Miscellaneous register */
@@ -70,27 +72,47 @@ static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = {
};
static const struct pci_device_id peak_pci_tbl[] = {
- {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {
+ PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI Express",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-miniPCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-miniPCIe",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PC/104-Plus Quad",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI/104-Express",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-cPCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-Chip PCIe",
+ },
#ifdef CONFIG_CAN_PEAK_PCIEC
- {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-ExpressCard",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-ExpressCard 34",
+ },
#endif
- {0,}
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
#ifdef CONFIG_CAN_PEAK_PCIEC
-/*
- * PCAN-ExpressCard needs I2C bit-banging configuration option.
- */
+/* PCAN-ExpressCard needs I2C bit-banging configuration option. */
/* GPIOICR byte access offsets */
#define PITA_GPOUT 0x18 /* GPx output value */
@@ -156,12 +178,14 @@ static void peak_pci_write_reg(const struct sja1000_priv *priv,
static inline void pita_set_scl_highz(struct peak_pciec_card *card)
{
u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL;
+
writeb(gp_outen, card->cfg_base + PITA_GPOEN);
}
static inline void pita_set_sda_highz(struct peak_pciec_card *card)
{
u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA;
+
writeb(gp_outen, card->cfg_base + PITA_GPOEN);
}
@@ -230,9 +254,7 @@ static int pita_getscl(void *data)
return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0;
}
-/*
- * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC
- */
+/* write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */
static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
u8 offset, u8 data)
{
@@ -248,7 +270,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
int ret;
/* cache led mask */
- if ((offset == 5) && (data == card->led_cache))
+ if (offset == 5 && data == card->led_cache)
return 0;
ret = i2c_transfer(&card->led_chip, &msg, 1);
@@ -261,9 +283,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
return 0;
}
-/*
- * delayed work callback used to control the LEDs
- */
+/* delayed work callback used to control the LEDs */
static void peak_pciec_led_work(struct work_struct *work)
{
struct peak_pciec_card *card =
@@ -309,9 +329,7 @@ static void peak_pciec_led_work(struct work_struct *work)
schedule_delayed_work(&card->led_work, HZ);
}
-/*
- * set LEDs blinking state
- */
+/* set LEDs blinking state */
static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
{
u8 new_led = card->led_cache;
@@ -328,25 +346,19 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
peak_pciec_write_pca9553(card, 5, new_led);
}
-/*
- * start one second delayed work to control LEDs
- */
+/* start one second delayed work to control LEDs */
static void peak_pciec_start_led_work(struct peak_pciec_card *card)
{
schedule_delayed_work(&card->led_work, HZ);
}
-/*
- * stop LEDs delayed work
- */
+/* stop LEDs delayed work */
static void peak_pciec_stop_led_work(struct peak_pciec_card *card)
{
cancel_delayed_work_sync(&card->led_work);
}
-/*
- * initialize the PCA9553 4-bit I2C-bus LED chip
- */
+/* initialize the PCA9553 4-bit I2C-bus LED chip */
static int peak_pciec_init_leds(struct peak_pciec_card *card)
{
int err;
@@ -375,17 +387,14 @@ static int peak_pciec_init_leds(struct peak_pciec_card *card)
return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT);
}
-/*
- * restore LEDs state to off peak_pciec_leds_exit
- */
+/* restore LEDs state to off peak_pciec_leds_exit */
static void peak_pciec_leds_exit(struct peak_pciec_card *card)
{
/* switch LEDs to off */
peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL);
}
-/*
- * normal write sja1000 register method overloaded to catch when controller
+/* normal write sja1000 register method overloaded to catch when controller
* is started or stopped, to control leds
*/
static void peak_pciec_write_reg(const struct sja1000_priv *priv,
@@ -443,7 +452,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
/* channel is the first one: do the init part */
} else {
/* create the bit banging I2C adapter structure */
- card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL);
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -506,9 +515,7 @@ static void peak_pciec_remove(struct peak_pciec_card *card)
#else /* CONFIG_CAN_PEAK_PCIEC */
-/*
- * Placebo functions when PCAN-ExpressCard support is not selected
- */
+/* Placebo functions when PCAN-ExpressCard support is not selected */
static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
{
return -ENODEV;
@@ -549,6 +556,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
+ char fw_str[14] = "";
err = pci_enable_device(pdev);
if (err)
@@ -602,6 +610,21 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Leave parport mux mode */
writeb(0x04, cfg_base + PITA_MISC + 3);
+ /* FPGA equipped card if not 0 */
+ if (readl(cfg_base + PEAK_VER_REG1)) {
+ /* FPGA card: display version of the running firmware */
+ u32 fw_ver = readl(cfg_base + PEAK_VER_REG2);
+
+ snprintf(fw_str, sizeof(fw_str), " FW v%u.%u.%u",
+ (fw_ver >> 12) & 0xf,
+ (fw_ver >> 8) & 0xf,
+ (fw_ver >> 4) & 0xf);
+ }
+
+ /* Display commercial name (and, eventually, FW version) of the card */
+ dev_info(&pdev->dev, "%ux CAN %s%s\n",
+ channels, (const char *)ent->driver_data, fw_str);
+
icr = readw(cfg_base + PITA_ICR + 2);
for (i = 0; i < channels; i++) {
@@ -642,8 +665,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
chan->prev_dev = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, dev);
- /*
- * PCAN-ExpressCard needs some additional i2c init.
+ /* PCAN-ExpressCard needs some additional i2c init.
* This must be done *before* register_sja1000dev() but
* *after* devices linkage
*/
@@ -709,7 +731,8 @@ failure_disable_pci:
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
- * (err is unchanged if negative) */
+ * (err is unchanged if negative)
+ */
return pcibios_err_to_errno(err);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 9ae48072b6c6..6c369a399c45 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -15,10 +15,10 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/unaligned.h>
@@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
priv->regs_status.intf);
- if (!(intf_pending))
+ if (!(intf_pending)) {
+ can_rx_offload_threaded_irq_finish(&priv->offload);
return handled;
+ }
/* Some interrupts must be ACKed in the
* MCP251XFD_REG_INT register.
@@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
} while (1);
out_fail:
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
err, priv->regs_status.intf);
mcp251xfd_dump(priv);
@@ -2524,8 +2528,8 @@ static int mcp251xfd_open(struct net_device *ndev)
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
- IRQF_ONESHOT, dev_name(&spi->dev),
- priv);
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&spi->dev), priv);
if (err)
goto out_can_rx_offload_disable;
@@ -2857,7 +2861,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
- u32 freq;
+ u32 freq = 0;
int err;
if (!spi->irq)
@@ -2884,11 +2888,19 @@ static int mcp251xfd_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
"Failed to get Transceiver regulator!\n");
- clk = devm_clk_get(&spi->dev, NULL);
+ clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&spi->dev, PTR_ERR(clk),
"Failed to get Oscillator (clock)!\n");
- freq = clk_get_rate(clk);
+ if (clk) {
+ freq = clk_get_rate(clk);
+ } else {
+ err = device_property_read_u32(&spi->dev, "clock-frequency",
+ &freq);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "Failed to get clock-frequency!\n");
+ }
/* Sanity check */
if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 73245d8836a9..353062ead98f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
int_status = hecc_read(priv, HECC_CANGIF0);
}
+ can_rx_offload_irq_finish(&priv->offload);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 66fa8b07c2e6..7370981e9b34 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -476,7 +476,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
netif_trans_update(netdev);
}
-static ssize_t show_firmware(struct device *d,
+static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -487,9 +487,9 @@ static ssize_t show_firmware(struct device *d,
(dev->version >> 8) & 0xf,
dev->version & 0xff);
}
-static DEVICE_ATTR(firmware, 0444, show_firmware, NULL);
+static DEVICE_ATTR_RO(firmware);
-static ssize_t show_hardware(struct device *d,
+static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -500,9 +500,9 @@ static ssize_t show_hardware(struct device *d,
(dev->version >> 24) & 0xf,
(dev->version >> 16) & 0xff);
}
-static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
+static DEVICE_ATTR_RO(hardware);
-static ssize_t show_nets(struct device *d,
+static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -510,7 +510,7 @@ static ssize_t show_nets(struct device *d,
return sprintf(buf, "%d", dev->net_count);
}
-static DEVICE_ATTR(nets, 0444, show_nets, NULL);
+static DEVICE_ATTR_RO(nets);
static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
{
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 1985f772fc3c..14e360c9f2c9 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -355,7 +355,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv,
return -EMSGSIZE;
if (priv->tx_can_msg_cnt == 0) {
- msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */
+ msg_len = sizeof(es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg);
es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE,
ES581_4_CMD_ID_TX_MSG,
priv->channel_idx, msg_len);
@@ -371,8 +371,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv,
return ret;
/* Fill message contents. */
- tx_can_msg = (struct es581_4_tx_can_msg *)
- &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1];
+ tx_can_msg = (typeof(tx_can_msg))&es581_4_urb_cmd->raw_msg[msg_len];
put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx);
put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 8e9102482c52..96a13c770e4a 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -19,7 +19,7 @@
#include "es58x_core.h"
#define DRV_VERSION "1.00"
-MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>");
+MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
MODULE_VERSION(DRV_VERSION);
@@ -70,7 +70,7 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
* bytes (the start of frame) are skipped and the CRC calculation
* starts on the third byte.
*/
-#define ES58X_CRC_CALC_OFFSET 2
+#define ES58X_CRC_CALC_OFFSET sizeof_field(union es58x_urb_cmd, sof)
/**
* es58x_calculate_crc() - Compute the crc16 of a given URB.
@@ -2108,6 +2108,25 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
}
/**
+ * es58x_free_netdevs() - Release all network resources of the device.
+ * @es58x_dev: ES58X device.
+ */
+static void es58x_free_netdevs(struct es58x_device *es58x_dev)
+{
+ int i;
+
+ for (i = 0; i < es58x_dev->num_can_ch; i++) {
+ struct net_device *netdev = es58x_dev->netdev[i];
+
+ if (!netdev)
+ continue;
+ unregister_candev(netdev);
+ es58x_dev->netdev[i] = NULL;
+ free_candev(netdev);
+ }
+}
+
+/**
* es58x_get_product_info() - Get the product information and print them.
* @es58x_dev: ES58X device.
*
@@ -2152,14 +2171,13 @@ static int es58x_get_product_info(struct es58x_device *es58x_dev)
/**
* es58x_init_es58x_dev() - Initialize the ES58X device.
* @intf: USB interface.
- * @p_es58x_dev: pointer to the address of the ES58X device.
* @driver_info: Quirks of the device.
*
- * Return: zero on success, errno when any error occurs.
+ * Return: pointer to an ES58X device on success, error pointer when
+ * any error occurs.
*/
-static int es58x_init_es58x_dev(struct usb_interface *intf,
- struct es58x_device **p_es58x_dev,
- kernel_ulong_t driver_info)
+static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
+ kernel_ulong_t driver_info)
{
struct device *dev = &intf->dev;
struct es58x_device *es58x_dev;
@@ -2176,7 +2194,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
if (ret)
- return ret;
+ return ERR_PTR(ret);
if (driver_info & ES58X_FD_FAMILY) {
param = &es58x_fd_param;
@@ -2186,9 +2204,10 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ops = &es581_4_ops;
}
- es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL);
+ es58x_dev = devm_kzalloc(dev, es58x_sizeof_es58x_device(param),
+ GFP_KERNEL);
if (!es58x_dev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
es58x_dev->param = param;
es58x_dev->ops = ops;
@@ -2213,9 +2232,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ep_out->bEndpointAddress);
es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
- *p_es58x_dev = es58x_dev;
-
- return 0;
+ return es58x_dev;
}
/**
@@ -2232,30 +2249,21 @@ static int es58x_probe(struct usb_interface *intf,
struct es58x_device *es58x_dev;
int ch_idx, ret;
- ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info);
- if (ret)
- return ret;
+ es58x_dev = es58x_init_es58x_dev(intf, id->driver_info);
+ if (IS_ERR(es58x_dev))
+ return PTR_ERR(es58x_dev);
ret = es58x_get_product_info(es58x_dev);
if (ret)
- goto cleanup_es58x_dev;
+ return ret;
for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
ret = es58x_init_netdev(es58x_dev, ch_idx);
- if (ret)
- goto cleanup_candev;
- }
-
- return ret;
-
- cleanup_candev:
- for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++)
- if (es58x_dev->netdev[ch_idx]) {
- unregister_candev(es58x_dev->netdev[ch_idx]);
- free_candev(es58x_dev->netdev[ch_idx]);
+ if (ret) {
+ es58x_free_netdevs(es58x_dev);
+ return ret;
}
- cleanup_es58x_dev:
- kfree(es58x_dev);
+ }
return ret;
}
@@ -2270,24 +2278,12 @@ static int es58x_probe(struct usb_interface *intf,
static void es58x_disconnect(struct usb_interface *intf)
{
struct es58x_device *es58x_dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- int i;
dev_info(&intf->dev, "Disconnecting %s %s\n",
es58x_dev->udev->manufacturer, es58x_dev->udev->product);
- for (i = 0; i < es58x_dev->num_can_ch; i++) {
- netdev = es58x_dev->netdev[i];
- if (!netdev)
- continue;
- unregister_candev(netdev);
- es58x_dev->netdev[i] = NULL;
- free_candev(netdev);
- }
-
+ es58x_free_netdevs(es58x_dev);
es58x_free_urbs(es58x_dev);
-
- kfree(es58x_dev);
usb_set_intfdata(intf, NULL);
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index fcf219e727bf..826a15871573 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -287,7 +287,7 @@ struct es58x_priv {
* @rx_urb_cmd_max_len: Maximum length of a RX URB command.
* @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head
* field of the struct es58x_priv into echo_skb
- * indexes. Properties: @fifo_mask = echos_skb_max - 1 where
+ * indexes. Properties: @fifo_mask = echo_skb_max - 1 where
* echo_skb_max must be a power of two. Also, echo_skb_max must
* not exceed the maximum size of the device internal TX FIFO
* length. This parameter is used to control the network queue
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 1a2779d383a4..af042aa55f59 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -357,8 +357,7 @@ static int es58x_fd_tx_can_msg(struct es58x_priv *priv,
return ret;
/* Fill message contents. */
- tx_can_msg = (struct es58x_fd_tx_can_msg *)
- &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len];
+ tx_can_msg = (typeof(tx_can_msg))&es58x_fd_urb_cmd->raw_msg[msg_len];
tx_can_msg->packet_idx = (u8)priv->tx_head;
put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
tx_can_msg->flags = (u8)es58x_get_flags(skb);
@@ -463,9 +462,9 @@ static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev)
}
/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in
- * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
- * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register"
- * from Microchip.
+ * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section
+ * 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" from
+ * Microchip.
*
* The values from the specification are the hardware register
* values. To convert them to the functional values, all ranges were
@@ -484,8 +483,8 @@ static const struct can_bittiming_const es58x_fd_nom_bittiming_const = {
};
/* Data bittiming constants for ES582.1 and ES584.1 as specified in
- * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
- * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from
+ * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section
+ * 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from
* Microchip.
*/
static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
@@ -501,9 +500,9 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
};
/* Transmission Delay Compensation constants for ES582.1 and ES584.1
- * as specified in the microcontroller datasheet: "SAM
- * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay
- * Compensation Register" from Microchip.
+ * as specified in the microcontroller datasheet: "SAM E70/S70/V70/V71
+ * Family" section 49.6.15 "MCAN Transmitter Delay Compensation
+ * Register" from Microchip.
*/
static const struct can_tdc_const es58x_tdc_const = {
.tdcv_max = 0, /* Manual mode not supported. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 899a3d21b77f..837b3fecd71e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -63,6 +63,8 @@
#define PCAN_USB_MSG_HEADER_LEN 2
+#define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */
+
/* PCAN-USB adapter internal clock (MHz) */
#define PCAN_USB_CRYSTAL_HZ 16000000
@@ -73,6 +75,10 @@
#define PCAN_USB_STATUSLEN_RTR (1 << 4)
#define PCAN_USB_STATUSLEN_DLC (0xf)
+/* PCAN-USB 4.1 CAN Id tx extended flags */
+#define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */
+#define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */
+
/* PCAN-USB error flags */
#define PCAN_USB_ERROR_TXFULL 0x01
#define PCAN_USB_ERROR_RXQOVR 0x02
@@ -385,7 +391,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
- *device_id = args[0];
+ else
+ *device_id = args[0];
return err;
}
@@ -446,145 +453,65 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
{
struct sk_buff *skb;
struct can_frame *cf;
- enum can_state new_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
/* ignore this error until 1st ts received */
if (n == PCAN_USB_ERROR_QOVR)
if (!mc->pdev->time_ref.tick_count)
return 0;
- new_state = mc->pdev->dev.can.state;
-
- switch (mc->pdev->dev.can.state) {
- case CAN_STATE_ERROR_ACTIVE:
- if (n & PCAN_USB_ERROR_BUS_LIGHT) {
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- fallthrough;
-
- case CAN_STATE_ERROR_WARNING:
- if (n & PCAN_USB_ERROR_BUS_HEAVY) {
- new_state = CAN_STATE_ERROR_PASSIVE;
- break;
- }
- if (n & PCAN_USB_ERROR_BUS_OFF) {
- new_state = CAN_STATE_BUS_OFF;
- break;
- }
- if (n & ~PCAN_USB_ERROR_BUS) {
- /*
- * trick to bypass next comparison and process other
- * errors
- */
- new_state = CAN_STATE_MAX;
- break;
- }
- if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
- /* no error (back to active state) */
- new_state = CAN_STATE_ERROR_ACTIVE;
- break;
- }
- break;
-
- case CAN_STATE_ERROR_PASSIVE:
- if (n & PCAN_USB_ERROR_BUS_OFF) {
- new_state = CAN_STATE_BUS_OFF;
- break;
- }
- if (n & PCAN_USB_ERROR_BUS_LIGHT) {
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- if (n & ~PCAN_USB_ERROR_BUS) {
- /*
- * trick to bypass next comparison and process other
- * errors
- */
- new_state = CAN_STATE_MAX;
- break;
- }
-
- if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
- /* no error (back to warning state) */
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- break;
-
- default:
- /* do nothing waiting for restart */
- return 0;
- }
-
- /* donot post any error if current state didn't change */
- if (mc->pdev->dev.can.state == new_state)
- return 0;
-
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(mc->netdev, &cf);
- if (!skb)
- return -ENOMEM;
-
- switch (new_state) {
- case CAN_STATE_BUS_OFF:
- cf->can_id |= CAN_ERR_BUSOFF;
- mc->pdev->dev.can.can_stats.bus_off++;
- can_bus_off(mc->netdev);
- break;
-
- case CAN_STATE_ERROR_PASSIVE:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
-
- mc->pdev->dev.can.can_stats.error_passive++;
- break;
-
- case CAN_STATE_ERROR_WARNING:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
-
- mc->pdev->dev.can.can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
-
- /* sync local copies of rxerr/txerr counters */
- mc->pdev->bec.txerr = 0;
- mc->pdev->bec.rxerr = 0;
- break;
-
- default:
- /* CAN_STATE_MAX (trick to handle other errors) */
- if (n & PCAN_USB_ERROR_TXQFULL)
- netdev_dbg(mc->netdev, "device Tx queue full)\n");
-
- if (n & PCAN_USB_ERROR_RXQOVR) {
- netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ if (n & PCAN_USB_ERROR_RXQOVR) {
+ /* data overrun interrupt */
+ netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+ if (cf) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- mc->netdev->stats.rx_over_errors++;
- mc->netdev->stats.rx_errors++;
}
+ }
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
- new_state = mc->pdev->dev.can.state;
- break;
+ if (n & PCAN_USB_ERROR_BUS_OFF) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (n & PCAN_USB_ERROR_BUS_HEAVY) {
+ new_state = ((mc->pdev->bec.txerr >= 128) ||
+ (mc->pdev->bec.rxerr >= 128)) ?
+ CAN_STATE_ERROR_PASSIVE :
+ CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
}
- mc->pdev->dev.can.state = new_state;
+ /* handle change of state */
+ if (new_state != mc->pdev->dev.can.state) {
+ enum can_state tx_state =
+ (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ?
+ new_state : 0;
+ enum can_state rx_state =
+ (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ?
+ new_state : 0;
+
+ can_change_state(mc->netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(mc->netdev);
+ } else if (cf && (cf->can_id & CAN_ERR_CRTL)) {
+ /* Supply TX/RX error counters in case of
+ * controller error.
+ */
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+ }
+ }
+
+ if (!skb)
+ return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
@@ -706,6 +633,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
struct sk_buff *skb;
struct can_frame *cf;
struct skb_shared_hwtstamps *hwts;
+ u32 can_id_flags;
skb = alloc_can_skb(mc->netdev, &cf);
if (!skb)
@@ -715,13 +643,15 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
- cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG;
+ can_id_flags = get_unaligned_le32(mc->ptr);
+ cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG;
mc->ptr += 4;
} else {
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
- cf->can_id = get_unaligned_le16(mc->ptr) >> 5;
+ can_id_flags = get_unaligned_le16(mc->ptr);
+ cf->can_id = can_id_flags >> 5;
mc->ptr += 2;
}
@@ -744,6 +674,10 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
memcpy(cf->data, mc->ptr, cf->len);
mc->ptr += rec_len;
+
+ /* Ignore next byte (client private id) if SRR bit is set */
+ if (can_id_flags & PCAN_USB_TX_SRR)
+ mc->ptr++;
}
/* convert timestamp into kernel time */
@@ -821,10 +755,11 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
struct net_device *netdev = dev->netdev;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 can_id_flags = cf->can_id & CAN_ERR_MASK;
u8 *pc;
- obuf[0] = 2;
- obuf[1] = 1;
+ obuf[0] = PCAN_USB_MSG_TX_CAN;
+ obuf[1] = 1; /* only one CAN frame is stored in the packet */
pc = obuf + PCAN_USB_MSG_HEADER_LEN;
@@ -839,12 +774,28 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
*pc |= PCAN_USB_STATUSLEN_EXT_ID;
pc++;
- put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc);
+ can_id_flags <<= 3;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ can_id_flags |= PCAN_USB_TX_SRR;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ can_id_flags |= PCAN_USB_TX_AT;
+
+ put_unaligned_le32(can_id_flags, pc);
pc += 4;
} else {
pc++;
- put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc);
+ can_id_flags <<= 5;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ can_id_flags |= PCAN_USB_TX_SRR;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ can_id_flags |= PCAN_USB_TX_AT;
+
+ put_unaligned_le16(can_id_flags, pc);
pc += 2;
}
@@ -854,6 +805,10 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
pc += cf->len;
}
+ /* SRR bit needs a writer id (useless here) */
+ if (can_id_flags & PCAN_USB_TX_SRR)
+ *pc++ = 0x80;
+
obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff);
return 0;
@@ -928,6 +883,19 @@ static int pcan_usb_init(struct peak_usb_device *dev)
return err;
}
+ /* Since rev 4.1, PCAN-USB is able to make single-shot as well as
+ * looped back frames.
+ */
+ if (dev->device_rev >= 41) {
+ struct can_priv *priv = netdev_priv(dev->netdev);
+
+ priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_LOOPBACK;
+ } else {
+ dev_info(dev->netdev->dev.parent,
+ "Firmware update available. Please contact support@peak-system.com\n");
+ }
+
dev_info(dev->netdev->dev.parent,
"PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
pcan_usb.name, dev->device_rev, serial_number,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 272b0535d946..ddb51dd132ef 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1221,14 +1221,36 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
bool found = false;
u16 pvlan;
- list_for_each_entry(dp, &dst->ports, list) {
- if (dp->ds->index == dev && dp->index == port) {
+ /* dev is a physical switch */
+ if (dev <= dst->last_switch) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ /* dp might be a DSA link or a user port, so it
+ * might or might not have a bridge_dev
+ * pointer. Use the "found" variable for both
+ * cases.
+ */
+ br = dp->bridge_dev;
+ found = true;
+ break;
+ }
+ }
+ /* dev is a virtual bridge */
+ } else {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_num < 0)
+ continue;
+
+ if (dp->bridge_num + 1 + dst->last_switch != dev)
+ continue;
+
+ br = dp->bridge_dev;
found = true;
break;
}
}
- /* Prevent frames from unknown switch or port */
+ /* Prevent frames from unknown switch or virtual bridge */
if (!found)
return 0;
@@ -1236,7 +1258,6 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
- br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
@@ -2422,6 +2443,44 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
mv88e6xxx_reg_unlock(chip);
}
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch + 1;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_pvt_map(chip, dev, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num)
+{
+ return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
+}
+
+static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num)
+{
+ int err;
+
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
+ if (err) {
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
+ ERR_PTR(err));
+ }
+}
+
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
@@ -3025,6 +3084,15 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ /* Since virtual bridges are mapped in the PVT, the number we support
+ * depends on the physical switch topology. We need to let DSA figure
+ * that out and therefore we cannot set this at dsa_register_switch()
+ * time.
+ */
+ if (mv88e6xxx_has_pvt(chip))
+ ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
+
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) {
@@ -6128,6 +6196,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
+ .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
+ .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index a2a15919b960..583a22d901b3 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -231,11 +231,6 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
return 0;
}
-static const struct dsa_8021q_ops felix_tag_8021q_ops = {
- .vlan_add = felix_tag_8021q_vlan_add,
- .vlan_del = felix_tag_8021q_vlan_del,
-};
-
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -425,29 +420,18 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
- felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
- GFP_KERNEL);
- if (!felix->dsa_8021q_ctx)
- return -ENOMEM;
-
- felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops;
- felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD);
- felix->dsa_8021q_ctx->ds = ds;
-
- err = dsa_8021q_setup(felix->dsa_8021q_ctx, true);
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
- goto out_free_dsa_8021_ctx;
+ return err;
err = felix_setup_mmio_filtering(felix);
if (err)
- goto out_teardown_dsa_8021q;
+ goto out_tag_8021q_unregister;
return 0;
-out_teardown_dsa_8021q:
- dsa_8021q_setup(felix->dsa_8021q_ctx, false);
-out_free_dsa_8021_ctx:
- kfree(felix->dsa_8021q_ctx);
+out_tag_8021q_unregister:
+ dsa_tag_8021q_unregister(ds);
return err;
}
@@ -462,11 +446,7 @@ static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
- err = dsa_8021q_setup(felix->dsa_8021q_ctx, false);
- if (err)
- dev_err(ds->dev, "dsa_8021q_setup returned %d", err);
-
- kfree(felix->dsa_8021q_ctx);
+ dsa_tag_8021q_unregister(ds);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -1679,6 +1659,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del = felix_mrp_del,
.port_mrp_add_ring_role = felix_mrp_add_ring_role,
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
+ .tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 4d96cad815d5..9da3c6a94c6e 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -60,7 +60,6 @@ struct felix {
struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
- struct dsa_8021q_context *dsa_8021q_ctx;
enum dsa_tag_protocol tag_proto;
};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 221c7abdef0e..9cd7dbdd7db9 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -226,27 +226,13 @@ struct sja1105_flow_block {
int num_virtual_links;
};
-struct sja1105_bridge_vlan {
- struct list_head list;
- int port;
- u16 vid;
- bool pvid;
- bool untagged;
-};
-
-enum sja1105_vlan_state {
- SJA1105_VLAN_UNAWARE,
- SJA1105_VLAN_BEST_EFFORT,
- SJA1105_VLAN_FILTERING_FULL,
-};
-
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool best_effort_vlan_filtering;
+ bool vlan_aware;
unsigned long learn_ena;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
@@ -255,16 +241,14 @@ struct sja1105_private {
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
- struct list_head dsa_8021q_vlans;
- struct list_head bridge_vlans;
+ u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
+ u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
- struct dsa_8021q_context *dsa_8021q_ctx;
- enum sja1105_vlan_state vlan_state;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1;
@@ -311,10 +295,6 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
void sja1105_devlink_teardown(struct dsa_switch *ds);
-int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx);
-int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx);
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index b6a4a16b8c7e..05c7f4ca3b1a 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -115,105 +115,6 @@ static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
kfree(priv->regions);
}
-static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
- bool *be_vlan)
-{
- *be_vlan = priv->best_effort_vlan_filtering;
-
- return 0;
-}
-
-static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
- bool be_vlan)
-{
- struct dsa_switch *ds = priv->ds;
- bool vlan_filtering;
- int port;
- int rc;
-
- priv->best_effort_vlan_filtering = be_vlan;
-
- rtnl_lock();
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp;
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- dp = dsa_to_port(ds, port);
- vlan_filtering = dsa_port_is_vlan_filtering(dp);
-
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, NULL);
- if (rc)
- break;
- }
- rtnl_unlock();
-
- return rc;
-}
-
-enum sja1105_devlink_param_id {
- SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
-};
-
-int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct sja1105_private *priv = ds->priv;
- int err;
-
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_get(priv,
- &ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct sja1105_private *priv = ds->priv;
- int err;
-
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_set(priv,
- ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static const struct devlink_param sja1105_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
- "best_effort_vlan_filtering",
- DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
-};
-
-static int sja1105_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
-static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -233,23 +134,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
int sja1105_devlink_setup(struct dsa_switch *ds)
{
- int rc;
-
- rc = sja1105_setup_devlink_params(ds);
- if (rc)
- return rc;
-
- rc = sja1105_setup_devlink_regions(ds);
- if (rc < 0) {
- sja1105_teardown_devlink_params(ds);
- return rc;
- }
-
- return 0;
+ return sja1105_setup_devlink_regions(ds);
}
void sja1105_devlink_teardown(struct dsa_switch *ds)
{
- sja1105_teardown_devlink_params(ds);
sja1105_teardown_devlink_regions(ds);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 56fead68ea9f..bd3ad18c150e 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1354,14 +1354,14 @@ u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params =
priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
- u64 poly_koopman = l2_lookup_params->poly;
+ u64 input, poly_koopman = l2_lookup_params->poly;
/* Convert polynomial from Koopman to 'normal' notation */
u8 poly = (u8)(1 + (poly_koopman << 1));
- u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid;
- u64 input = (vlanid << 48) | ether_addr_to_u64(addr);
u8 crc = 0; /* seed */
int i;
+ input = ((u64)vid << 48) | ether_addr_to_u64(addr);
+
/* Mask the eight bytes starting from MSB one at a time */
for (i = 56; i >= 0; i -= 8) {
u8 byte = (input & (0xffull << i)) >> i;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index e2dc997580a8..5ab1676a7448 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -57,6 +57,81 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
return !!(l2_fwd[from].reach_port & BIT(to));
}
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].drpuntag == drop)
+ return 0;
+
+ mac[port].drpuntag = drop;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].vlanid == pvid)
+ return 0;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_vlan_lookup_entry *vlan;
+ bool drop_untagged = false;
+ int match, rc;
+ u16 pvid;
+
+ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
+ pvid = priv->bridge_pvid[port];
+ else
+ pvid = priv->tag_8021q_pvid[port];
+
+ rc = sja1105_pvid_apply(priv, port, pvid);
+ if (rc)
+ return rc;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+
+ match = sja1105_is_vlan_configured(priv, pvid);
+
+ if (match < 0 || !(vlan[match].vmemb_port & BIT(port)))
+ drop_untagged = true;
+
+ return sja1105_drop_untagged(ds, port, drop_untagged);
+}
+
static int sja1105_init_mac_settings(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry default_mac = {
@@ -378,8 +453,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_bridge_vlan *v;
-
if (dsa_is_unused_port(ds, port))
continue;
@@ -387,22 +460,10 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- v->port = port;
- v->vid = SJA1105_DEFAULT_VLAN;
- v->untagged = true;
- if (dsa_is_cpu_port(ds, port))
- v->pvid = true;
- list_add(&v->list, &priv->dsa_8021q_vlans);
-
- v = kmemdup(v, sizeof(*v), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- list_add(&v->list, &priv->bridge_vlans);
+ if (dsa_is_cpu_port(ds, port)) {
+ priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN;
+ priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN;
+ }
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
@@ -551,18 +612,11 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
- int max_mem = priv->info->max_frame_mem;
struct sja1105_table *table;
- /* VLAN retagging is implemented using a loopback port that consumes
- * frame buffers. That leaves less for us.
- */
- if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
- max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
-
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
l2_fwd_params = table->entries;
- l2_fwd_params->part_spc[0] = max_mem;
+ l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
/* If we have any critical-traffic virtual links, we need to reserve
* some frame buffer memory for them. At the moment, hardcode the value
@@ -1422,7 +1476,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
+ if (priv->vlan_aware) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1485,7 +1539,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
+ if (priv->vlan_aware) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1522,18 +1576,6 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
- * so the switch still does some VLAN processing internally.
- * But Shared VLAN Learning (SVL) is also active, and it will take
- * care of autonomous forwarding between the unique pvid's of each
- * port. Here we just make sure that users can't add duplicate FDB
- * entries when in this mode - the actual VID doesn't matter except
- * for what gets printed in 'bridge fdb show'. In the case of zero,
- * no VID gets printed at all.
- */
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
- vid = 0;
-
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
@@ -1542,9 +1584,6 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
- vid = 0;
-
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
@@ -1587,7 +1626,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ if (!priv->vlan_aware)
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1692,6 +1731,10 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
if (rc)
return rc;
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+
return sja1105_manage_flood_domains(priv);
}
@@ -1991,97 +2034,6 @@ out:
return rc;
}
-static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
-{
- struct sja1105_mac_config_entry *mac;
-
- mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
-
- mac[port].vlanid = pvid;
-
- return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
- &mac[port], true);
-}
-
-static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
- int tree_index, int sw_index,
- int other_port, struct net_device *br)
-{
- struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
- struct sja1105_private *other_priv = other_ds->priv;
- struct sja1105_private *priv = ds->priv;
- int port, rc;
-
- if (other_ds->ops != &sja1105_switch_ops)
- return 0;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_user_port(ds, port))
- continue;
- if (dsa_to_port(ds, port)->bridge_dev != br)
- continue;
-
- rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
- port,
- other_priv->dsa_8021q_ctx,
- other_port);
- if (rc)
- return rc;
-
- rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
- other_port,
- priv->dsa_8021q_ctx,
- port);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
- int tree_index, int sw_index,
- int other_port,
- struct net_device *br)
-{
- struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
- struct sja1105_private *other_priv = other_ds->priv;
- struct sja1105_private *priv = ds->priv;
- int port;
-
- if (other_ds->ops != &sja1105_switch_ops)
- return;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_user_port(ds, port))
- continue;
- if (dsa_to_port(ds, port)->bridge_dev != br)
- continue;
-
- dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
- other_priv->dsa_8021q_ctx,
- other_port);
-
- dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
- other_port,
- priv->dsa_8021q_ctx, port);
- }
-}
-
-static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
-{
- struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
- if (rc)
- return rc;
-
- dev_info(ds->dev, "%s switch tagging\n",
- enabled ? "Enabled" : "Disabled");
- return 0;
-}
-
static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -2091,669 +2043,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
return priv->info->tag_proto;
}
-static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
-{
- int subvlan;
-
- if (pvid)
- return 0;
-
- for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- if (subvlan_map[subvlan] == VLAN_N_VID)
- return subvlan;
-
- return -1;
-}
-
-static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
-{
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- if (subvlan_map[subvlan] == vid)
- return subvlan;
-
- return -1;
-}
-
-static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
- int port, u16 vid)
-{
- struct sja1105_port *sp = &priv->ports[port];
-
- return sja1105_find_subvlan(sp->subvlan_map, vid);
-}
-
-static void sja1105_init_subvlan_map(u16 *subvlan_map)
-{
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- subvlan_map[subvlan] = VLAN_N_VID;
-}
-
-static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
- u16 *subvlan_map)
-{
- struct sja1105_port *sp = &priv->ports[port];
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- sp->subvlan_map[subvlan] = subvlan_map[subvlan];
-}
-
-static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
-{
- struct sja1105_vlan_lookup_entry *vlan;
- int count, i;
-
- vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
- count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
-
- for (i = 0; i < count; i++)
- if (vlan[i].vlanid == vid)
- return i;
-
- /* Return an invalid entry index if not found */
- return -1;
-}
-
-static int
-sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
- int count, int from_port, u16 from_vid,
- u16 to_vid)
-{
- int i;
-
- for (i = 0; i < count; i++)
- if (retagging[i].ing_port == BIT(from_port) &&
- retagging[i].vlan_ing == from_vid &&
- retagging[i].vlan_egr == to_vid)
- return i;
-
- /* Return an invalid entry index if not found */
- return -1;
-}
-
-static int sja1105_commit_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int num_retagging)
-{
- struct sja1105_retagging_entry *retagging;
- struct sja1105_vlan_lookup_entry *vlan;
- struct sja1105_table *table;
- int num_vlans = 0;
- int rc, i, k = 0;
-
- /* VLAN table */
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- vlan = table->entries;
-
- for (i = 0; i < VLAN_N_VID; i++) {
- int match = sja1105_is_vlan_configured(priv, i);
-
- if (new_vlan[i].vlanid != VLAN_N_VID)
- num_vlans++;
-
- if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
- /* Was there before, no longer is. Delete */
- dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
- rc = sja1105_dynamic_config_write(priv,
- BLK_IDX_VLAN_LOOKUP,
- i, &vlan[match], false);
- if (rc < 0)
- return rc;
- } else if (new_vlan[i].vlanid != VLAN_N_VID) {
- /* Nothing changed, don't do anything */
- if (match >= 0 &&
- vlan[match].vlanid == new_vlan[i].vlanid &&
- vlan[match].tag_port == new_vlan[i].tag_port &&
- vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
- vlan[match].vmemb_port == new_vlan[i].vmemb_port)
- continue;
- /* Update entry */
- dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
- rc = sja1105_dynamic_config_write(priv,
- BLK_IDX_VLAN_LOOKUP,
- i, &new_vlan[i],
- true);
- if (rc < 0)
- return rc;
- }
- }
-
- if (table->entry_count)
- kfree(table->entries);
-
- table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
- GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = num_vlans;
- vlan = table->entries;
-
- for (i = 0; i < VLAN_N_VID; i++) {
- if (new_vlan[i].vlanid == VLAN_N_VID)
- continue;
- vlan[k++] = new_vlan[i];
- }
-
- /* VLAN Retagging Table */
- table = &priv->static_config.tables[BLK_IDX_RETAGGING];
- retagging = table->entries;
-
- for (i = 0; i < table->entry_count; i++) {
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
- i, &retagging[i], false);
- if (rc)
- return rc;
- }
-
- if (table->entry_count)
- kfree(table->entries);
-
- table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
- GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = num_retagging;
- retagging = table->entries;
-
- for (i = 0; i < num_retagging; i++) {
- retagging[i] = new_retagging[i];
-
- /* Update entry */
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
- i, &retagging[i], true);
- if (rc < 0)
- return rc;
- }
-
- return 0;
-}
-
-struct sja1105_crosschip_vlan {
- struct list_head list;
- u16 vid;
- bool untagged;
- int port;
- int other_port;
- struct dsa_8021q_context *other_ctx;
-};
-
-struct sja1105_crosschip_switch {
- struct list_head list;
- struct dsa_8021q_context *other_ctx;
-};
-
-static int sja1105_commit_pvid(struct sja1105_private *priv)
-{
- struct sja1105_bridge_vlan *v;
- struct list_head *vlan_list;
- int rc = 0;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- vlan_list = &priv->bridge_vlans;
- else
- vlan_list = &priv->dsa_8021q_vlans;
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->pvid) {
- rc = sja1105_pvid_apply(priv, v->port, v->vid);
- if (rc)
- break;
- }
- }
-
- return rc;
-}
-
-static int
-sja1105_build_bridge_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan)
-{
- struct sja1105_bridge_vlan *v;
-
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
- return 0;
-
- list_for_each_entry(v, &priv->bridge_vlans, list) {
- int match = v->vid;
-
- new_vlan[match].vlanid = v->vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(v->port);
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- }
-
- return 0;
-}
-
-static int
-sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan)
-{
- struct sja1105_bridge_vlan *v;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- return 0;
-
- list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
- int match = v->vid;
-
- new_vlan[match].vlanid = v->vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(v->port);
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- }
-
- return 0;
-}
-
-static int sja1105_build_subvlans(struct sja1105_private *priv,
- u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int *num_retagging)
-{
- struct sja1105_bridge_vlan *v;
- int k = *num_retagging;
-
- if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
- return 0;
-
- list_for_each_entry(v, &priv->bridge_vlans, list) {
- int upstream = dsa_upstream_port(priv->ds, v->port);
- int match, subvlan;
- u16 rx_vid;
-
- /* Only sub-VLANs on user ports need to be applied.
- * Bridge VLANs also include VLANs added automatically
- * by DSA on the CPU port.
- */
- if (!dsa_is_user_port(priv->ds, v->port))
- continue;
-
- subvlan = sja1105_find_subvlan(subvlan_map[v->port],
- v->vid);
- if (subvlan < 0) {
- subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
- v->pvid);
- if (subvlan < 0) {
- dev_err(priv->ds->dev, "No more free subvlans\n");
- return -ENOSPC;
- }
- }
-
- rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
-
- /* @v->vid on @v->port needs to be retagged to @rx_vid
- * on @upstream. Assume @v->vid on @v->port and on
- * @upstream was already configured by the previous
- * iteration over bridge_vlans.
- */
- match = rx_vid;
- new_vlan[match].vlanid = rx_vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vmemb_port |= BIT(upstream);
- new_vlan[match].vlan_bc |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(upstream);
- /* The "untagged" flag is set the same as for the
- * original VLAN
- */
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- /* But it's always tagged towards the CPU */
- new_vlan[match].tag_port |= BIT(upstream);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
-
- /* The Retagging Table generates packet *clones* with
- * the new VLAN. This is a very odd hardware quirk
- * which we need to suppress by dropping the original
- * packet.
- * Deny egress of the original VLAN towards the CPU
- * port. This will force the switch to drop it, and
- * we'll see only the retagged packets.
- */
- match = v->vid;
- new_vlan[match].vlan_bc &= ~BIT(upstream);
-
- /* And the retagging itself */
- new_retagging[k].vlan_ing = v->vid;
- new_retagging[k].vlan_egr = rx_vid;
- new_retagging[k].ing_port = BIT(v->port);
- new_retagging[k].egr_port = BIT(upstream);
- if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
- dev_err(priv->ds->dev, "No more retagging rules\n");
- return -ENOSPC;
- }
-
- subvlan_map[v->port][subvlan] = v->vid;
- }
-
- *num_retagging = k;
-
- return 0;
-}
-
-/* Sadly, in crosschip scenarios where the CPU port is also the link to another
- * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
- * the CPU port of neighbour switches.
- */
-static int
-sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int *num_retagging)
-{
- struct sja1105_crosschip_vlan *tmp, *pos;
- struct dsa_8021q_crosschip_link *c;
- struct sja1105_bridge_vlan *v, *w;
- struct list_head crosschip_vlans;
- int k = *num_retagging;
- int rc = 0;
-
- if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
- return 0;
-
- INIT_LIST_HEAD(&crosschip_vlans);
-
- list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
- struct sja1105_private *other_priv = c->other_ctx->ds->priv;
-
- if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- continue;
-
- /* Crosschip links are also added to the CPU ports.
- * Ignore those.
- */
- if (!dsa_is_user_port(priv->ds, c->port))
- continue;
- if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
- continue;
-
- /* Search for VLANs on the remote port */
- list_for_each_entry(v, &other_priv->bridge_vlans, list) {
- bool already_added = false;
- bool we_have_it = false;
-
- if (v->port != c->other_port)
- continue;
-
- /* If @v is a pvid on @other_ds, it does not need
- * re-retagging, because its SVL field is 0 and we
- * already allow that, via the dsa_8021q crosschip
- * links.
- */
- if (v->pvid)
- continue;
-
- /* Search for the VLAN on our local port */
- list_for_each_entry(w, &priv->bridge_vlans, list) {
- if (w->port == c->port && w->vid == v->vid) {
- we_have_it = true;
- break;
- }
- }
-
- if (!we_have_it)
- continue;
-
- list_for_each_entry(tmp, &crosschip_vlans, list) {
- if (tmp->vid == v->vid &&
- tmp->untagged == v->untagged &&
- tmp->port == c->port &&
- tmp->other_port == v->port &&
- tmp->other_ctx == c->other_ctx) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- dev_err(priv->ds->dev, "Failed to allocate memory\n");
- rc = -ENOMEM;
- goto out;
- }
- tmp->vid = v->vid;
- tmp->port = c->port;
- tmp->other_port = v->port;
- tmp->other_ctx = c->other_ctx;
- tmp->untagged = v->untagged;
- list_add(&tmp->list, &crosschip_vlans);
- }
- }
-
- list_for_each_entry(tmp, &crosschip_vlans, list) {
- struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
- int upstream = dsa_upstream_port(priv->ds, tmp->port);
- int match, subvlan;
- u16 rx_vid;
-
- subvlan = sja1105_find_committed_subvlan(other_priv,
- tmp->other_port,
- tmp->vid);
- /* If this happens, it's a bug. The neighbour switch does not
- * have a subvlan for tmp->vid on tmp->other_port, but it
- * should, since we already checked for its vlan_state.
- */
- if (WARN_ON(subvlan < 0)) {
- rc = -EINVAL;
- goto out;
- }
-
- rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
- tmp->other_port,
- subvlan);
-
- /* The @rx_vid retagged from @tmp->vid on
- * {@tmp->other_ds, @tmp->other_port} needs to be
- * re-retagged to @tmp->vid on the way back to us.
- *
- * Assume the original @tmp->vid is already configured
- * on this local switch, otherwise we wouldn't be
- * retagging its subvlan on the other switch in the
- * first place. We just need to add a reverse retagging
- * rule for @rx_vid and install @rx_vid on our ports.
- */
- match = rx_vid;
- new_vlan[match].vlanid = rx_vid;
- new_vlan[match].vmemb_port |= BIT(tmp->port);
- new_vlan[match].vmemb_port |= BIT(upstream);
- /* The "untagged" flag is set the same as for the
- * original VLAN. And towards the CPU, it doesn't
- * really matter, because @rx_vid will only receive
- * traffic on that port. For consistency with other dsa_8021q
- * VLANs, we'll keep the CPU port tagged.
- */
- if (!tmp->untagged)
- new_vlan[match].tag_port |= BIT(tmp->port);
- new_vlan[match].tag_port |= BIT(upstream);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- /* Deny egress of @rx_vid towards our front-panel port.
- * This will force the switch to drop it, and we'll see
- * only the re-retagged packets (having the original,
- * pre-initial-retagging, VLAN @tmp->vid).
- */
- new_vlan[match].vlan_bc &= ~BIT(tmp->port);
-
- /* On reverse retagging, the same ingress VLAN goes to multiple
- * ports. So we have an opportunity to create composite rules
- * to not waste the limited space in the retagging table.
- */
- k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
- upstream, rx_vid, tmp->vid);
- if (k < 0) {
- if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
- dev_err(priv->ds->dev, "No more retagging rules\n");
- rc = -ENOSPC;
- goto out;
- }
- k = (*num_retagging)++;
- }
- /* And the retagging itself */
- new_retagging[k].vlan_ing = rx_vid;
- new_retagging[k].vlan_egr = tmp->vid;
- new_retagging[k].ing_port = BIT(upstream);
- new_retagging[k].egr_port |= BIT(tmp->port);
- }
-
-out:
- list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
- list_del(&tmp->list);
- kfree(tmp);
- }
-
- return rc;
-}
-
-static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
-
-static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
-{
- struct sja1105_crosschip_switch *s, *pos;
- struct list_head crosschip_switches;
- struct dsa_8021q_crosschip_link *c;
- int rc = 0;
-
- INIT_LIST_HEAD(&crosschip_switches);
-
- list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
- bool already_added = false;
-
- list_for_each_entry(s, &crosschip_switches, list) {
- if (s->other_ctx == c->other_ctx) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (!s) {
- dev_err(priv->ds->dev, "Failed to allocate memory\n");
- rc = -ENOMEM;
- goto out;
- }
- s->other_ctx = c->other_ctx;
- list_add(&s->list, &crosschip_switches);
- }
-
- list_for_each_entry(s, &crosschip_switches, list) {
- struct sja1105_private *other_priv = s->other_ctx->ds->priv;
-
- rc = sja1105_build_vlan_table(other_priv, false);
- if (rc)
- goto out;
- }
-
-out:
- list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
- list_del(&s->list);
- kfree(s);
- }
-
- return rc;
-}
-
-static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
-{
- u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
- struct sja1105_retagging_entry *new_retagging;
- struct sja1105_vlan_lookup_entry *new_vlan;
- struct sja1105_table *table;
- int i, num_retagging = 0;
- int rc;
-
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- new_vlan = kcalloc(VLAN_N_VID,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!new_vlan)
- return -ENOMEM;
-
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!new_retagging) {
- kfree(new_vlan);
- return -ENOMEM;
- }
-
- for (i = 0; i < VLAN_N_VID; i++)
- new_vlan[i].vlanid = VLAN_N_VID;
-
- for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
- new_retagging[i].vlan_ing = VLAN_N_VID;
-
- for (i = 0; i < priv->ds->num_ports; i++)
- sja1105_init_subvlan_map(subvlan_map[i]);
-
- /* Bridge VLANs */
- rc = sja1105_build_bridge_vlans(priv, new_vlan);
- if (rc)
- goto out;
-
- /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
- * - RX VLANs
- * - TX VLANs
- * - Crosschip links
- */
- rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
- if (rc)
- goto out;
-
- /* Private VLANs necessary for dsa_8021q operation, which we need to
- * determine on our own:
- * - Sub-VLANs
- * - Sub-VLANs of crosschip switches
- */
- rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
- &num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
- &num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_commit_pvid(priv);
- if (rc)
- goto out;
-
- for (i = 0; i < priv->ds->num_ports; i++)
- sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
-
- if (notify) {
- rc = sja1105_notify_crosschip_switches(priv);
- if (rc)
- goto out;
- }
-
-out:
- kfree(new_vlan);
- kfree(new_retagging);
-
- return rc;
-}
-
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
@@ -2764,10 +2053,8 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
- enum sja1105_vlan_state state;
struct sja1105_table *table;
struct sja1105_rule *rule;
- bool want_tagging;
u16 tpid, tpid2;
int rc;
@@ -2798,19 +2085,10 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
sp->xmit_tpid = ETH_P_SJA1105;
}
- if (!enabled)
- state = SJA1105_VLAN_UNAWARE;
- else if (priv->best_effort_vlan_filtering)
- state = SJA1105_VLAN_BEST_EFFORT;
- else
- state = SJA1105_VLAN_FILTERING_FULL;
-
- if (priv->vlan_state == state)
+ if (priv->vlan_aware == enabled)
return 0;
- priv->vlan_state = state;
- want_tagging = (state == SJA1105_VLAN_UNAWARE ||
- state == SJA1105_VLAN_BEST_EFFORT);
+ priv->vlan_aware = enabled;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
@@ -2824,8 +2102,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- want_tagging = priv->best_effort_vlan_filtering || !enabled;
-
/* VLAN filtering => independent VLAN learning.
* No VLAN filtering (or best effort) => shared VLAN learning.
*
@@ -2846,132 +2122,143 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = want_tagging;
+ l2_lookup_params->shared_learn = !priv->vlan_aware;
- sja1105_frame_memory_partitioning(priv);
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
- rc = sja1105_build_vlan_table(priv, false);
- if (rc)
- return rc;
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+ }
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
- /* Switch port identification based on 802.1Q is only passable
- * if we are not under a vlan_filtering bridge. So make sure
- * the two configurations are mutually exclusive (of course, the
- * user may know better, i.e. best_effort_vlan_filtering).
- */
- return sja1105_setup_8021q_tagging(ds, want_tagging);
+ return rc;
}
-/* Returns number of VLANs added (0 or 1) on success,
- * or a negative error code.
- */
-static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
- u16 flags, struct list_head *vlan_list)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct sja1105_bridge_vlan *v;
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- /* Already added */
- if (v->untagged == untagged && v->pvid == pvid)
- /* Nothing changed */
- return 0;
-
- /* It's the same VLAN, but some of the flags changed
- * and the user did not bother to delete it first.
- * Update it and trigger sja1105_build_vlan_table.
- */
- v->untagged = untagged;
- v->pvid = pvid;
- return 1;
- }
- }
+static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid,
+ u16 flags)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ int match, rc;
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v) {
- dev_err(ds->dev, "Out of memory while storing VLAN\n");
- return -ENOMEM;
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
}
- v->port = port;
- v->vid = vid;
- v->untagged = untagged;
- v->pvid = pvid;
- list_add(&v->list, vlan_list);
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].type_entry = SJA1110_VLAN_D_TAG;
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc |= BIT(port);
+ vlan[match].vmemb_port |= BIT(port);
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
- return 1;
+ return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], true);
}
-/* Returns number of VLANs deleted (0 or 1) */
-static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
- struct list_head *vlan_list)
+static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid)
{
- struct sja1105_bridge_vlan *v, *n;
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
- list_for_each_entry_safe(v, n, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- list_del(&v->list);
- kfree(v);
- return 1;
- }
- }
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ /* Can't delete a missing entry. */
+ if (match < 0)
+ return 0;
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ /* Also unset tag_port, just so we don't have a confusing bitmap
+ * (no practical purpose).
+ */
+ vlan[match].tag_port &= ~BIT(port);
+
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
return 0;
}
-static int sja1105_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- bool vlan_table_changed = false;
+ u16 flags = vlan->flags;
int rc;
- /* If the user wants best-effort VLAN filtering (aka vlan_filtering
- * bridge plus tagging), be sure to at least deny alterations to the
- * configuration done by dsa_8021q.
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
*/
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
- vid_is_dsa_8021q(vlan->vid)) {
+ if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
"Range 1024-3071 reserved for dsa_8021q operation");
return -EBUSY;
}
- rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
- &priv->bridge_vlans);
- if (rc < 0)
+ /* Always install bridge VLANs as egress-tagged on the CPU port. */
+ if (dsa_is_cpu_port(ds, port))
+ flags = 0;
+
+ rc = sja1105_vlan_add(priv, port, vlan->vid, flags);
+ if (rc)
return rc;
- if (rc > 0)
- vlan_table_changed = true;
- if (!vlan_table_changed)
- return 0;
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ priv->bridge_pvid[port] = vlan->vid;
- return sja1105_build_vlan_table(priv, true);
+ return sja1105_commit_pvid(ds, port);
}
-static int sja1105_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
- bool vlan_table_changed = false;
int rc;
- rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
- if (rc > 0)
- vlan_table_changed = true;
-
- if (!vlan_table_changed)
- return 0;
+ rc = sja1105_vlan_del(priv, port, vlan->vid);
+ if (rc)
+ return rc;
- return sja1105_build_vlan_table(priv, true);
+ /* In case the pvid was deleted, make sure that untagged packets will
+ * be dropped.
+ */
+ return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
@@ -2980,29 +2267,49 @@ static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
struct sja1105_private *priv = ds->priv;
int rc;
- rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
- if (rc <= 0)
+ rc = sja1105_vlan_add(priv, port, vid, flags);
+ if (rc)
return rc;
- return sja1105_build_vlan_table(priv, true);
+ if (flags & BRIDGE_VLAN_INFO_PVID)
+ priv->tag_8021q_pvid[port] = vid;
+
+ return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- int rc;
- rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
- if (!rc)
- return 0;
-
- return sja1105_build_vlan_table(priv, true);
+ return sja1105_vlan_del(priv, port, vid);
}
-static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
- .vlan_add = sja1105_dsa_8021q_vlan_add,
- .vlan_del = sja1105_dsa_8021q_vlan_del,
-};
+static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack = info->info.extack;
+ struct net_device *upper = info->upper_dev;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ if (is_vlan_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported");
+ return -EBUSY;
+ }
+
+ if (netif_is_bridge_master(upper)) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_dev && dp->bridge_dev != upper &&
+ br_vlan_enabled(dp->bridge_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
@@ -3086,24 +2393,21 @@ static int sja1105_setup(struct dsa_switch *ds)
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
*/
ds->vlan_filtering_is_global = true;
+ ds->untag_bridge_pvid = true;
+ /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
+ ds->num_fwd_offloading_bridges = 7;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
ds->mtu_enforcement_ingress = true;
- priv->best_effort_vlan_filtering = true;
-
rc = sja1105_devlink_setup(ds);
if (rc < 0)
goto out_static_config_free;
- /* The DSA/switchdev model brings up switch ports in standalone mode by
- * default, and that means vlan_filtering is 0 since they're not under
- * a bridge, so it's safe to set up switch tagging at this time.
- */
rtnl_lock();
- rc = sja1105_setup_8021q_tagging(ds, true);
+ rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
rtnl_unlock();
if (rc)
goto out_devlink_teardown;
@@ -3125,9 +2429,12 @@ out_static_config_free:
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_bridge_vlan *v, *n;
int port;
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
+
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
@@ -3143,16 +2450,6 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
-
- list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
- list_del(&v->list);
- kfree(v);
- }
-
- list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
- list_del(&v->list);
- kfree(v);
- }
}
static void sja1105_port_disable(struct dsa_switch *ds, int port)
@@ -3592,8 +2889,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
.port_vlan_filtering = sja1105_vlan_filtering,
- .port_vlan_add = sja1105_vlan_add,
- .port_vlan_del = sja1105_vlan_del,
+ .port_vlan_add = sja1105_bridge_vlan_add,
+ .port_vlan_del = sja1105_bridge_vlan_del,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
@@ -3608,11 +2905,12 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.cls_flower_add = sja1105_cls_flower_add,
.cls_flower_del = sja1105_cls_flower_del,
.cls_flower_stats = sja1105_cls_flower_stats,
- .crosschip_bridge_join = sja1105_crosschip_bridge_join,
- .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
- .devlink_param_get = sja1105_devlink_param_get,
- .devlink_param_set = sja1105_devlink_param_set,
.devlink_info_get = sja1105_devlink_info_get,
+ .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
+ .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
+ .port_prechangeupper = sja1105_prechangeupper,
+ .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
+ .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3756,19 +3054,6 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
- priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
- GFP_KERNEL);
- if (!priv->dsa_8021q_ctx)
- return -ENOMEM;
-
- priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
- priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
- priv->dsa_8021q_ctx->ds = ds;
-
- INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
- INIT_LIST_HEAD(&priv->bridge_vlans);
- INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
-
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -3791,7 +3076,6 @@ static int sja1105_probe(struct spi_device *spi)
struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
- int subvlan;
if (!dsa_is_user_port(ds, port))
continue;
@@ -3812,9 +3096,6 @@ static int sja1105_probe(struct spi_device *spi)
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- sp->subvlan_map[subvlan] = VLAN_N_VID;
}
return 0;
@@ -3838,8 +3119,10 @@ out_unregister_switch:
static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
+ struct dsa_switch *ds = priv->ds;
+
+ dsa_unregister_switch(ds);
- dsa_unregister_switch(priv->ds);
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f6e13e6c6a18..ec7b65daec20 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -496,14 +496,11 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
int rc;
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
- key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
- priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
- key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -595,14 +592,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
- key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
- priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
- key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 74263f8efe1a..8ef34901c2d8 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -113,6 +113,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/compat.h>
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -131,7 +132,8 @@
static int eql_open(struct net_device *dev);
static int eql_close(struct net_device *dev);
-static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
#define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
@@ -170,7 +172,7 @@ static const char version[] __initconst =
static const struct net_device_ops eql_netdev_ops = {
.ndo_open = eql_open,
.ndo_stop = eql_close,
- .ndo_do_ioctl = eql_ioctl,
+ .ndo_siocdevprivate = eql_siocdevprivate,
.ndo_start_xmit = eql_slave_xmit,
};
@@ -268,25 +270,29 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
-static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (in_compat_syscall()) /* to be implemented */
+ return -EOPNOTSUPP;
+
switch (cmd) {
case EQL_ENSLAVE:
- return eql_enslave(dev, ifr->ifr_data);
+ return eql_enslave(dev, data);
case EQL_EMANCIPATE:
- return eql_emancipate(dev, ifr->ifr_data);
+ return eql_emancipate(dev, data);
case EQL_GETSLAVECFG:
- return eql_g_slave_cfg(dev, ifr->ifr_data);
+ return eql_g_slave_cfg(dev, data);
case EQL_SETSLAVECFG:
- return eql_s_slave_cfg(dev, ifr->ifr_data);
+ return eql_s_slave_cfg(dev, data);
case EQL_GETMASTRCFG:
- return eql_g_master_cfg(dev, ifr->ifr_data);
+ return eql_g_master_cfg(dev, data);
case EQL_SETMASTRCFG:
- return eql_s_master_cfg(dev, ifr->ifr_data);
+ return eql_s_master_cfg(dev, data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index f66e7fb9a2bb..dd4d3c48b98d 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -252,7 +252,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_start_xmit = el3_start_xmit,
.ndo_tx_timeout = el3_tx_timeout,
.ndo_get_stats = el3_get_stats,
- .ndo_do_ioctl = el3_ioctl,
+ .ndo_eth_ioctl = el3_ioctl,
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7d7d3ffe25c3..17c16333a412 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1052,7 +1052,7 @@ static const struct net_device_ops boomrang_netdev_ops = {
.ndo_tx_timeout = vortex_tx_timeout,
.ndo_get_stats = vortex_get_stats,
#ifdef CONFIG_PCI
- .ndo_do_ioctl = vortex_ioctl,
+ .ndo_eth_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
@@ -1069,7 +1069,7 @@ static const struct net_device_ops vortex_netdev_ops = {
.ndo_tx_timeout = vortex_tx_timeout,
.ndo_get_stats = vortex_get_stats,
#ifdef CONFIG_PCI
- .ndo_do_ioctl = vortex_ioctl,
+ .ndo_eth_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 172947fc051a..9595dd1f32ca 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -635,7 +635,7 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
static const struct net_device_ops ax_netdev_ops = {
.ndo_open = ax_open,
.ndo_stop = ax_close,
- .ndo_do_ioctl = ax_ioctl,
+ .ndo_eth_ioctl = ax_ioctl,
.ndo_start_xmit = ax_ei_start_xmit,
.ndo_tx_timeout = ax_ei_tx_timeout,
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 8c321dfc7b3b..3c370e686ec3 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -128,7 +128,7 @@ static inline struct axnet_dev *PRIV(struct net_device *dev)
static const struct net_device_ops axnet_netdev_ops = {
.ndo_open = axnet_open,
.ndo_stop = axnet_close,
- .ndo_do_ioctl = axnet_ioctl,
+ .ndo_eth_ioctl = axnet_ioctl,
.ndo_start_xmit = axnet_start_xmit,
.ndo_tx_timeout = axnet_tx_timeout,
.ndo_get_stats = get_stats,
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index cac036706382..96ad72abd373 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -223,7 +223,7 @@ static const struct net_device_ops pcnet_netdev_ops = {
.ndo_set_config = set_config,
.ndo_start_xmit = ei_start_xmit,
.ndo_get_stats = ei_get_stats,
- .ndo_do_ioctl = ei_ioctl,
+ .ndo_eth_ioctl = ei_ioctl,
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index b8e771c2bc40..c4ecf4fcadf8 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1179,8 +1179,8 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
}
-static int owl_emac_ndo_do_ioctl(struct net_device *netdev,
- struct ifreq *req, int cmd)
+static int owl_emac_ndo_eth_ioctl(struct net_device *netdev,
+ struct ifreq *req, int cmd)
{
if (!netif_running(netdev))
return -EINVAL;
@@ -1224,7 +1224,7 @@ static const struct net_device_ops owl_emac_netdev_ops = {
.ndo_set_rx_mode = owl_emac_ndo_set_rx_mode,
.ndo_set_mac_address = owl_emac_ndo_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = owl_emac_ndo_do_ioctl,
+ .ndo_eth_ioctl = owl_emac_ndo_eth_ioctl,
.ndo_tx_timeout = owl_emac_ndo_tx_timeout,
.ndo_get_stats = owl_emac_ndo_get_stats,
};
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 7965e5e3c985..e0f6cc910bd2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -625,7 +625,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = tx_timeout,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef VLAN_SUPPORT
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 41f8821f792d..920633161174 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3882,7 +3882,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
};
static int et131x_pci_setup(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index f99ae317c188..037baea1c738 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -774,7 +774,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9cac5aa75a73..92e4246dc359 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1729,7 +1729,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
.ndo_set_rx_mode = amd8111e_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = amd8111e_set_mac_address,
- .ndo_do_ioctl = amd8111e_ioctl,
+ .ndo_eth_ioctl = amd8111e_ioctl,
.ndo_change_mtu = amd8111e_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = amd8111e_poll,
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 19e195420e24..9c1636222b99 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1051,7 +1051,7 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
.ndo_set_rx_mode = au1000_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 4100ab07e6b7..70d76fdb9f56 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1572,7 +1572,7 @@ static const struct net_device_ops pcnet32_netdev_ops = {
.ndo_tx_timeout = pcnet32_tx_timeout,
.ndo_get_stats = pcnet32_get_stats,
.ndo_set_rx_mode = pcnet32_set_multicast_list,
- .ndo_do_ioctl = pcnet32_ioctl,
+ .ndo_eth_ioctl = pcnet32_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4f714f874c4f..17a585adfb49 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2284,7 +2284,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_eth_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 4af0cd9530de..e22935ce9573 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -421,7 +421,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
- .ndo_do_ioctl = aq_ndev_ioctl,
+ .ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 67b8113a2b53..38c288ec9059 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -844,7 +844,7 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
.ndo_set_rx_mode = arc_emac_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = arc_emac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1ba81b1eb6fd..02ae98aabf91 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1851,7 +1851,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 11ef1fbe7aee..4ea157efca86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1701,7 +1701,7 @@ static const struct net_device_ops alx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = alx_set_mac_address,
.ndo_change_mtu = alx_change_mtu,
- .ndo_do_ioctl = alx_ioctl,
+ .ndo_eth_ioctl = alx_ioctl,
.ndo_tx_timeout = alx_tx_timeout,
.ndo_fix_features = alx_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1c6246a5dc22..3b51b172b317 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2609,7 +2609,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
.ndo_set_features = atl1c_set_features,
- .ndo_do_ioctl = atl1c_ioctl,
+ .ndo_eth_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2eb0a2ab69f6..753973ac922e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2247,7 +2247,7 @@ static const struct net_device_ops atl1e_netdev_ops = {
.ndo_fix_features = atl1e_fix_features,
.ndo_set_features = atl1e_set_features,
.ndo_change_mtu = atl1e_change_mtu,
- .ndo_do_ioctl = atl1e_ioctl,
+ .ndo_eth_ioctl = atl1e_ioctl,
.ndo_tx_timeout = atl1e_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1e_netpoll,
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index c67201a13cf5..68f6c0bbd945 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2885,7 +2885,7 @@ static const struct net_device_ops atl1_netdev_ops = {
.ndo_change_mtu = atl1_change_mtu,
.ndo_fix_features = atlx_fix_features,
.ndo_set_features = atlx_set_features,
- .ndo_do_ioctl = atlx_ioctl,
+ .ndo_eth_ioctl = atlx_ioctl,
.ndo_tx_timeout = atlx_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1_poll_controller,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 0cc0db04c27d..b69298ddb647 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1293,7 +1293,7 @@ static const struct net_device_ops atl2_netdev_ops = {
.ndo_change_mtu = atl2_change_mtu,
.ndo_fix_features = atl2_fix_features,
.ndo_set_features = atl2_set_features,
- .ndo_do_ioctl = atl2_ioctl,
+ .ndo_eth_ioctl = atl2_ioctl,
.ndo_tx_timeout = atl2_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl2_poll_controller,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index ad2655efe423..fa784953c601 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2198,7 +2198,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = b44_ioctl,
+ .ndo_eth_ioctl = b44_ioctl,
.ndo_tx_timeout = b44_tx_timeout,
.ndo_change_mtu = b44_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 977f097fc7bf..d56886300ecf 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1699,7 +1699,7 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_rx_mode = bcm_enet_set_multicast_list,
- .ndo_do_ioctl = bcm_enet_ioctl,
+ .ndo_eth_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
};
@@ -2446,7 +2446,7 @@ static const struct net_device_ops bcm_enetsw_ops = {
.ndo_stop = bcm_enetsw_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_change_mtu = bcm_enet_change_mtu,
- .ndo_do_ioctl = bcm_enetsw_ioctl,
+ .ndo_eth_ioctl = bcm_enetsw_ioctl,
};
@@ -2649,7 +2649,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
if (!res_mem || irq_rx < 0)
return -ENODEV;
- ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 075f6e146b29..fe4d99abd548 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1263,7 +1263,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = bgmac_change_mtu,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bee6cfad9fc6..89ee1c0e9c79 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8546,7 +8546,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
.ndo_stop = bnx2_close,
.ndo_get_stats64 = bnx2_get_stats64,
.ndo_set_rx_mode = bnx2_set_rx_mode,
- .ndo_do_ioctl = bnx2_ioctl,
+ .ndo_eth_ioctl = bnx2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnx2_change_mac_addr,
.ndo_change_mtu = bnx2_change_mtu,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2acbc73dcd18..6d98134913cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13048,7 +13048,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = bnx2x_validate_addr,
- .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_eth_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 27943b0446c2..f255fd0b16db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1858,7 +1858,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
int first_queue_query_index, num_queues_req;
- dma_addr_t cur_data_offset;
struct stats_query_entry *cur_query_entry;
u8 stats_count = 0;
bool is_fcoe = false;
@@ -1879,10 +1878,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
first_queue_query_index + num_queues_req);
- cur_data_offset = bp->fw_stats_data_mapping +
- offsetof(struct bnx2x_fw_stats_data, queue_stats) +
- num_queues_req * sizeof(struct per_queue_stats);
-
cur_query_entry = &bp->fw_stats_req->
query[first_queue_query_index + num_queues_req];
@@ -1933,7 +1928,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry->funcID,
j, cur_query_entry->index);
cur_query_entry++;
- cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
/* all stats are coalesced to the leading queue */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 89606587b156..03b821897cf3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -277,6 +277,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
+ ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -2042,6 +2044,19 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
return INVALID_HW_RING_ID;
}
+static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
+{
+ switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
+ netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
+ BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
+ break;
+ default:
+ netdev_err(bp->dev, "FW reported unknown error type\n");
+ break;
+ }
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -2202,6 +2217,14 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
+ bnxt_ptp_pps_event(bp, data1, data2);
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
+ bnxt_event_error_report(bp, data1, data2);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -7495,9 +7518,14 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto no_ptp;
}
- return 0;
+ rc = bnxt_ptp_init(bp);
+ if (!rc)
+ return 0;
+
+ netdev_warn(bp->dev, "PTP initialization failed.\n");
no_ptp:
+ bnxt_ptp_clear(bp);
kfree(ptp);
bp->ptp_cfg = NULL;
return rc;
@@ -7540,6 +7568,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7577,6 +7607,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
__bnxt_hwrm_ptp_qcfg(bp);
} else {
+ bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
}
@@ -10277,15 +10308,9 @@ static int bnxt_open(struct net_device *dev)
if (rc)
return rc;
- if (bnxt_ptp_init(bp)) {
- netdev_warn(dev, "PTP initialization failed.\n");
- kfree(bp->ptp_cfg);
- bp->ptp_cfg = NULL;
- }
rc = __bnxt_open_nic(bp, true, true);
if (rc) {
bnxt_hwrm_if_change(bp, false);
- bnxt_ptp_clear(bp);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10376,7 +10401,6 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- bnxt_ptp_clear(bp);
bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
@@ -11363,7 +11387,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
}
- bnxt_ptp_clear(bp);
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
@@ -11399,13 +11422,20 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 wait_dsecs;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
if (fw_health->master) {
@@ -11461,9 +11491,16 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
if (bp->pf.active_vfs &&
!test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
n = bnxt_get_registered_vfs(bp);
@@ -12135,6 +12172,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
+ bnxt_ptp_reapply_pps(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
@@ -12666,7 +12704,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_stop = bnxt_close,
.ndo_get_stats64 = bnxt_get_stats64,
.ndo_set_rx_mode = bnxt_set_rx_mode,
- .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_eth_ioctl = bnxt_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnxt_change_mac_addr,
.ndo_change_mtu = bnxt_change_mtu,
@@ -12705,6 +12743,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
+ bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13317,6 +13356,7 @@ init_err_pci_clean:
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_ethtool_free(bp);
+ bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bcf8d00b8c80..e379c48c1df9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -496,6 +496,16 @@ struct rx_tpa_end_cmp_ext {
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -1887,6 +1897,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_PTP_PPS 0x10000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index ec381c2423b8..7f55ebbfd04b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -55,16 +55,19 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
}
/* Caller holds ptp_lock */
-static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts)
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 ns;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EIO;
ptp_read_system_prets(sts);
- ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
- return ns;
+ *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ return 0;
}
static void bnxt_ptp_get_current_time(struct bnxt *bp)
@@ -75,7 +78,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
return;
spin_lock_bh(&ptp->ptp_lock);
WRITE_ONCE(ptp->old_time, ptp->current_time);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
spin_unlock_bh(&ptp->ptp_lock);
}
@@ -108,9 +111,14 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns, cycles;
+ int rc;
spin_lock_bh(&ptp->ptp_lock);
- cycles = bnxt_refclk_read(ptp->bp, sts);
+ rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
ns = timecounter_cyc2time(&ptp->tc, cycles);
spin_unlock_bh(&ptp->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -147,10 +155,207 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
return rc;
}
-static int bnxt_ptp_enable(struct ptp_clock_info *ptp,
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_event event;
+ u64 ns, pps_ts;
+
+ pps_ts = EVENT_PPS_TS(data2, data1);
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, pps_ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
+ event.pps_times.ts_real = ns_to_timespec64(ns);
+ event.type = PTP_CLOCK_PPSUSR;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL:
+ event.timestamp = ns;
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ }
+
+ ptp_clock_event(bp->ptp_cfg->ptp_clock, &event);
+}
+
+static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
+{
+ struct hwrm_func_ptp_pin_cfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u8 state = usage != BNXT_PPS_PIN_NONE;
+ u8 *pin_state, *pin_usg;
+ u32 enables;
+ int rc;
+
+ if (!TSIO_PIN_VALID(pin)) {
+ netdev_err(ptp->bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n");
+ return -EOPNOTSUPP;
+ }
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1);
+ enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
+ FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
+ req.enables = cpu_to_le32(enables);
+
+ pin_state = &req.pin0_state;
+ pin_usg = &req.pin0_usage;
+
+ *(pin_state + (pin * 2)) = state;
+ *(pin_usg + (pin * 2)) = usage;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ ptp->pps_info.pins[pin].usage = usage;
+ ptp->pps_info.pins[pin].state = state;
+
+ return 0;
+}
+
+static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
+{
+ struct hwrm_func_ptp_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
+ req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
+ req.ptp_pps_event = event;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+void bnxt_ptp_reapply_pps(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pps *pps;
+ u32 pin = 0;
+ int rc;
+
+ if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) ||
+ !(ptp->ptp_info.pin_config))
+ return;
+ pps = &ptp->pps_info;
+ for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) {
+ if (pps->pins[pin].state) {
+ rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage);
+ if (!rc && pps->pins[pin].event)
+ rc = bnxt_ptp_cfg_event(bp,
+ pps->pins[pin].event);
+ if (rc)
+ netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n",
+ pin);
+ }
+ }
+}
+
+static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
+ u64 *cycles_delta)
+{
+ u64 cycles_now;
+ u64 nsec_now, nsec_delta;
+ int rc;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
+ nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_delta = target_ns - nsec_now;
+ *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
+ return 0;
+}
+
+static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
+ struct ptp_clock_request *rq)
+{
+ struct hwrm_func_ptp_cfg_input req = {0};
+ struct bnxt *bp = ptp->bp;
+ struct timespec64 ts;
+ u64 target_ns, delta;
+ u16 enables;
+ int rc;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ target_ns = timespec64_to_ns(&ts);
+
+ rc = bnxt_get_target_cycles(ptp, target_ns, &delta);
+ if (rc)
+ return rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
+
+ enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
+ req.enables = cpu_to_le16(enables);
+ req.ptp_pps_event = 0;
+ req.ptp_freq_adj_dll_source = 0;
+ req.ptp_freq_adj_dll_phase = 0;
+ req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
+ req.ptp_freq_adj_ext_up = 0;
+ req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
- return -EOPNOTSUPP;
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct bnxt *bp = ptp->bp;
+ u8 pin_id;
+ int rc;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Configure an External PPS IN */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (!on)
+ break;
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL;
+ return rc;
+ case PTP_CLK_REQ_PEROUT:
+ /* Configure a Periodic PPS OUT */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (!on)
+ break;
+
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT);
+ if (!rc)
+ rc = bnxt_ptp_perout_cfg(ptp, rq);
+
+ return rc;
+ case PTP_CLK_REQ_PPS:
+ /* Configure PHC PPS IN */
+ rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL;
+ return rc;
+ default:
+ netdev_err(ptp->bp->dev, "Unrecognized PIN function\n");
+ return -EOPNOTSUPP;
+ }
+
+ return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE);
}
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
@@ -309,8 +514,10 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
static u64 bnxt_cc_read(const struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+ u64 ns = 0;
- return bnxt_refclk_read(ptp->bp, NULL);
+ bnxt_refclk_read(ptp->bp, NULL, &ns);
+ return ns;
}
static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
@@ -408,6 +615,80 @@ static const struct ptp_clock_info bnxt_ptp_caps = {
.enable = bnxt_ptp_enable,
};
+static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ /* Allow only PPS pin function configuration */
+ if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT &&
+ func != PTP_PF_PHYSYNC)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+}
+
+/* bp->hwrm_cmd_lock held by the caller */
+static int bnxt_ptp_pps_init(struct bnxt *bp)
+{
+ struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_ptp_pin_qcfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_info *ptp_info;
+ struct bnxt_pps *pps_info;
+ u8 *pin_usg;
+ u32 i, rc;
+
+ /* Query current/default PIN CFG */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc || !resp->num_pins)
+ return -EOPNOTSUPP;
+
+ ptp_info = &ptp->ptp_info;
+ pps_info = &ptp->pps_info;
+ pps_info->num_pins = resp->num_pins;
+ ptp_info->n_pins = pps_info->num_pins;
+ ptp_info->pin_config = kcalloc(ptp_info->n_pins,
+ sizeof(*ptp_info->pin_config),
+ GFP_KERNEL);
+ if (!ptp_info->pin_config)
+ return -ENOMEM;
+
+ /* Report the TSIO capability to kernel */
+ pin_usg = &resp->pin0_usage;
+ for (i = 0; i < pps_info->num_pins; i++, pin_usg++) {
+ snprintf(ptp_info->pin_config[i].name,
+ sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
+ ptp_info->pin_config[i].index = i;
+ ptp_info->pin_config[i].chan = i;
+ if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
+ ptp_info->pin_config[i].func = PTP_PF_EXTTS;
+ else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
+ ptp_info->pin_config[i].func = PTP_PF_PEROUT;
+ else
+ ptp_info->pin_config[i].func = PTP_PF_NONE;
+
+ pps_info->pins[i].usage = *pin_usg;
+ }
+
+ /* Only 1 each of ext_ts and per_out pins is available in HW */
+ ptp_info->n_ext_ts = 1;
+ ptp_info->n_per_out = 1;
+ ptp_info->pps = 1;
+ ptp_info->verify = bnxt_ptp_verify;
+
+ return 0;
+}
+
+static bool bnxt_pps_config_ok(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
+}
+
int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -420,6 +701,15 @@ int bnxt_ptp_init(struct bnxt *bp)
if (rc)
return rc;
+ if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
+ return 0;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
@@ -433,6 +723,10 @@ int bnxt_ptp_init(struct bnxt *bp)
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
ptp->ptp_info = bnxt_ptp_caps;
+ if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+ if (bnxt_ptp_pps_init(bp))
+ netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n");
+ }
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
if (IS_ERR(ptp->ptp_clock)) {
int err = PTR_ERR(ptp->ptp_clock);
@@ -443,7 +737,7 @@ int bnxt_ptp_init(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
spin_unlock_bh(&ptp->ptp_lock);
ptp_schedule_worker(ptp->ptp_clock, 0);
@@ -462,6 +756,9 @@ void bnxt_ptp_clear(struct bnxt *bp)
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 254ba7bc0f99..cc3cdbaab6cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -21,11 +21,62 @@
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+struct pps_pin {
+ u8 event;
+ u8 usage;
+ u8 state;
+};
+
+#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
+
+#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
+ ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
+
+#define EVENT_DATA2_PPS_PIN_NUM(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT)
+
+#define BNXT_DATA2_UPPER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK
+
+#define BNXT_DATA2_UPPER_SFT \
+ (32 - \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT)
+
+#define BNXT_DATA1_LOWER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK
+
+#define BNXT_DATA1_LOWER_SFT \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT
+
+#define EVENT_PPS_TS(data2, data1) \
+ (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\
+ (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT))
+
+#define BNXT_PPS_PIN_DISABLE 0
+#define BNXT_PPS_PIN_ENABLE 1
+#define BNXT_PPS_PIN_NONE 0
+#define BNXT_PPS_PIN_PPS_IN 1
+#define BNXT_PPS_PIN_PPS_OUT 2
+#define BNXT_PPS_PIN_SYNC_IN 3
+#define BNXT_PPS_PIN_SYNC_OUT 4
+
+#define BNXT_PPS_EVENT_INTERNAL 1
+#define BNXT_PPS_EVENT_EXTERNAL 2
+
+struct bnxt_pps {
+ u8 num_pins;
+#define BNXT_MAX_TSIO_PINS 4
+ struct pps_pin pins[BNXT_MAX_TSIO_PINS];
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct cyclecounter cc;
struct timecounter tc;
+ struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
struct sk_buff *tx_skb;
@@ -75,6 +126,8 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index db74241935ab..63e2237e0cb4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3659,7 +3659,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_tx_timeout = bcmgenet_timeout,
.ndo_set_rx_mode = bcmgenet_set_rx_mode,
.ndo_set_mac_address = bcmgenet_set_mac_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5b4568c2ad1c..f38f40eb966e 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2136,7 +2136,7 @@ static const struct net_device_ops sbmac_netdev_ops = {
.ndo_start_xmit = sbmac_start_tx,
.ndo_set_rx_mode = sbmac_set_rx_mode,
.ndo_tx_timeout = sbmac_tx_timeout,
- .ndo_do_ioctl = sbmac_mii_ioctl,
+ .ndo_eth_ioctl = sbmac_mii_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b0e49643f483..6f82eeaa4b9f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14290,7 +14290,7 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = tg3_set_rx_mode,
.ndo_set_mac_address = tg3_set_mac_addr,
- .ndo_do_ioctl = tg3_ioctl,
+ .ndo_eth_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
.ndo_fix_features = tg3_fix_features,
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7d2fe13a52f8..181ebc235925 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3664,7 +3664,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_get_stats = macb_get_stats,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -4323,7 +4323,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 591229b96257..a4a5209a9386 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3223,7 +3223,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_set_vf_mac = liquidio_set_vf_mac,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index ffddb3126a32..3085dd455a17 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1889,7 +1889,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
};
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 48ff6fb0eed9..30463a6d1f8c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1373,7 +1373,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_eth_ioctl = octeon_mgmt_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index e2b290135fd9..efaaa57d4ed5 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2096,7 +2096,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_do_ioctl = nicvf_ioctl,
+ .ndo_eth_ioctl = nicvf_ioctl,
.ndo_set_rx_mode = nicvf_set_rx_mode,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 512da98019c6..e7575d41f4f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -924,7 +924,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_get_stats = t1_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = t1_set_rxmode,
- .ndo_do_ioctl = t1_ioctl,
+ .ndo_eth_ioctl = t1_ioctl,
.ndo_change_mtu = t1_change_mtu,
.ndo_set_mac_address = t1_set_mac_addr,
.ndo_fix_features = t1_fix_features,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 57f210c53afc..72af9d2a00ae 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2135,13 +2135,18 @@ static int in_range(int val, int lo, int hi)
return val < 0 || (val <= hi && val >= lo);
}
-static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+static int cxgb_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifreq,
+ void __user *useraddr,
+ int cmd)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 cmd;
int ret;
+ if (cmd != SIOCCHIOCTL)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
@@ -2546,8 +2551,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
fallthrough;
case SIOCGMIIPHY:
return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
- case SIOCCHIOCTL:
- return cxgb_extension_ioctl(dev, req->ifr_data);
default:
return -EOPNOTSUPP;
}
@@ -3181,7 +3184,8 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_get_stats = cxgb_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = cxgb_set_rxmode,
- .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_eth_ioctl = cxgb_ioctl,
+ .ndo_siocdevprivate = cxgb_siocdevprivate,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_fix_features = cxgb_fix_features,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dbf9a0e6601d..aa8573202c37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3872,7 +3872,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_eth_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2820a0bb971b..2842628ad2c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2837,7 +2837,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb4vf_do_ioctl,
+ .ndo_eth_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
.ndo_fix_features = cxgb4vf_fix_features,
.ndo_set_features = cxgb4vf_set_features,
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 9f5e5ec69991..072fac5f5d24 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -733,7 +733,7 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_open = ep93xx_open,
.ndo_stop = ep93xx_close,
.ndo_start_xmit = ep93xx_xmit,
- .ndo_do_ioctl = ep93xx_ioctl,
+ .ndo_eth_ioctl = ep93xx_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 2a8bf53c2f75..e842de6f6635 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1372,7 +1372,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
.ndo_start_xmit = dm9000_start_xmit,
.ndo_tx_timeout = dm9000_timeout,
.ndo_set_rx_mode = dm9000_hash_table,
- .ndo_do_ioctl = dm9000_ioctl,
+ .ndo_eth_ioctl = dm9000_ioctl,
.ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index b125d7faefdf..36ab4cbf2ad0 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -443,6 +443,7 @@
=========================================================================
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -902,7 +903,8 @@ static int de4x5_close(struct net_device *dev);
static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
static void set_multicast_list(struct net_device *dev);
-static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
/*
** Private functions
@@ -1084,7 +1086,7 @@ static const struct net_device_ops de4x5_netdev_ops = {
.ndo_start_xmit = de4x5_queue_pkt,
.ndo_get_stats = de4x5_get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = de4x5_ioctl,
+ .ndo_siocdevprivate = de4x5_siocdevprivate,
.ndo_set_mac_address= eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -5357,7 +5359,7 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
** this function is only used for my testing.
*/
static int
-de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct de4x5_private *lp = netdev_priv(dev);
struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
@@ -5371,6 +5373,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
} tmp;
u_long flags = 0;
+ if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch(ioc->cmd) {
case DE4X5_GET_HWADDR: /* Get the hardware address */
ioc->len = ETH_ALEN;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c1dcd6ca1457..fcedd733bacb 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1271,7 +1271,7 @@ static const struct net_device_ops tulip_netdev_ops = {
.ndo_tx_timeout = tulip_tx_timeout,
.ndo_stop = tulip_close,
.ndo_get_stats = tulip_get_stats,
- .ndo_do_ioctl = private_ioctl,
+ .ndo_eth_ioctl = private_ioctl,
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1876f15dd827..85b99099c6b9 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -341,7 +341,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 734acb834c98..202ecb132053 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -95,7 +95,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = set_multicast,
- .ndo_do_ioctl = rio_ioctl,
+ .ndo_eth_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
};
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index ee0ca712dd1c..c36d186dffed 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -479,7 +479,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 48c6eb142dcc..6c51cf991dad 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -742,7 +742,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_stop = dnet_close,
.ndo_get_stats = dnet_get_stats,
.ndo_start_xmit = dnet_start_xmit,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e1b43b07755b..ed1ed48e7483 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1009,7 +1009,7 @@ static const struct ethtool_ops ethoc_ethtool_ops = {
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
- .ndo_do_ioctl = ethoc_ioctl,
+ .ndo_eth_ioctl = ethoc_ioctl,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 11dbbfd38770..ff76e401a014 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1616,7 +1616,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ftgmac100_tx_timeout,
.ndo_set_rx_mode = ftgmac100_set_rx_mode,
.ndo_set_features = ftgmac100_set_features,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 5a1a8f2ea63c..8a341e2d5833 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1043,7 +1043,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_start_xmit = ftmac100_hard_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ftmac100_do_ioctl,
+ .ndo_eth_ioctl = ftmac100_do_ioctl,
};
/******************************************************************************
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 0f141c14d72d..25c91b3c5fd3 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -463,7 +463,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = mii_ioctl,
+ .ndo_eth_ioctl = mii_ioctl,
.ndo_tx_timeout = fealnx_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e6826561cf11..685d2d8a3b36 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3157,7 +3157,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
- .ndo_do_ioctl = dpaa_ioctl,
+ .ndo_eth_ioctl = dpaa_ioctl,
.ndo_setup_tc = dpaa_setup_tc,
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 973352393bd4..f664021c3ad1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2594,7 +2594,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_get_stats64 = dpaa2_eth_get_stats,
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
- .ndo_do_ioctl = dpaa2_eth_ioctl,
+ .ndo_eth_ioctl = dpaa2_eth_ioctl,
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index f9451ec5f2cb..d6eefbbf163f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -111,11 +111,11 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
return 0;
}
-int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct ethsw_core *ethsw = filter_block->ethsw;
struct dpsw_acl_key *acl_key = &entry->key;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
@@ -136,7 +136,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, acl_entry_cfg);
+ filter_block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
@@ -150,12 +150,13 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
-static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct dpaa2_switch_acl_entry *entry)
+static int
+dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
struct dpsw_acl_key *acl_key = &entry->key;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct ethsw_core *ethsw = block->ethsw;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
int err;
@@ -175,7 +176,7 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, acl_entry_cfg);
+ block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
@@ -190,19 +191,19 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
struct list_head *pos, *n;
int index = 0;
- if (list_empty(&acl_tbl->entries)) {
- list_add(&entry->list, &acl_tbl->entries);
+ if (list_empty(&block->acl_entries)) {
+ list_add(&entry->list, &block->acl_entries);
return index;
}
- list_for_each_safe(pos, n, &acl_tbl->entries) {
+ list_for_each_safe(pos, n, &block->acl_entries) {
tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
if (entry->prio < tmp->prio)
break;
@@ -213,13 +214,13 @@ dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static struct dpaa2_switch_acl_entry*
-dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
int index)
{
struct dpaa2_switch_acl_entry *tmp;
int i = 0;
- list_for_each_entry(tmp, &acl_tbl->entries, list) {
+ list_for_each_entry(tmp, &block->acl_entries, list) {
if (i == index)
return tmp;
++i;
@@ -229,37 +230,38 @@ dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry,
int precedence)
{
int err;
- err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
entry->cfg.precedence = precedence;
- return dpaa2_switch_acl_entry_add(acl_tbl, entry);
+ return dpaa2_switch_acl_entry_add(block, entry);
}
-static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct dpaa2_switch_acl_entry *entry)
+static int
+dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
/* Add the new ACL entry to the linked list and get its index */
- index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
+ index = dpaa2_switch_acl_entry_add_to_list(block, entry);
/* Move up in priority the ACL entries to make space
* for the new filter.
*/
- precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
for (i = 0; i < index; i++) {
- tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
- err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
@@ -269,19 +271,19 @@ static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Add the new entry to hardware */
entry->cfg.precedence = precedence;
- err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
- acl_tbl->num_rules++;
+ err = dpaa2_switch_acl_entry_add(block, entry);
+ block->num_acl_rules++;
return err;
}
static struct dpaa2_switch_acl_entry *
-dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
unsigned long cookie)
{
struct dpaa2_switch_acl_entry *tmp, *n;
- list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == cookie)
return tmp;
}
@@ -289,13 +291,13 @@ dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp, *n;
int index = 0;
- list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == entry->cookie)
return index;
index++;
@@ -303,21 +305,34 @@ dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOENT;
}
+static struct dpaa2_switch_mirror_entry *
+dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_mirror_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
static int
-dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
- index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
+ index = dpaa2_switch_acl_entry_get_index(block, entry);
/* Remove from hardware the ACL entry */
- err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
- acl_tbl->num_rules--;
+ block->num_acl_rules--;
/* Remove it from the list also */
list_del(&entry->list);
@@ -325,8 +340,8 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Move down in priority the entries over the deleted one */
precedence = entry->cfg.precedence;
for (i = index - 1; i >= 0; i--) {
- tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
- err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
@@ -339,10 +354,10 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
-static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
- struct flow_action_entry *cls_act,
- struct dpsw_acl_result *dpsw_act,
- struct netlink_ext_ack *extack)
+static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -374,22 +389,110 @@ out:
return err;
}
-int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+static int
+dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry,
+ u16 to, struct netlink_ext_ack *extack)
+{
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct ethsw_port_priv *port_priv;
+ unsigned long ports_added = 0;
+ u16 vlan = entry->cfg.vlan_id;
+ bool mirror_port_enabled;
+ int err, port;
+
+ /* Setup the mirroring port */
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ if (!mirror_port_enabled) {
+ err = dpsw_set_reflection_if(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, to);
+ if (err)
+ return err;
+ ethsw->mirror_port = to;
+ }
+
+ /* Setup the same egress mirroring configuration on all the switch
+ * ports that share the same filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
+ port_priv = ethsw->ports[port];
+
+ /* We cannot add a per VLAN mirroring rule if the VLAN in
+ * question is not installed on the switch port.
+ */
+ if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
+ NL_SET_ERR_MSG(extack,
+ "VLAN must be installed on the switch port");
+ err = -EINVAL;
+ goto err_remove_filters;
+ }
+
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port, &entry->cfg);
+ if (err)
+ goto err_remove_filters;
+
+ ports_added |= BIT(port);
+ }
+
+ list_add(&entry->list, &block->mirror_entries);
+
+ return 0;
+
+err_remove_filters:
+ for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, &entry->cfg);
+ }
+
+ if (!mirror_port_enabled)
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return err;
+}
+
+static int
+dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry)
+{
+ struct dpsw_reflection_cfg *cfg = &entry->cfg;
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ int port;
+
+ /* Remove this mirroring configuration from all the ports belonging to
+ * the filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, cfg);
+
+ /* Also remove it from the list of mirror filters */
+ list_del(&entry->list);
+ kfree(entry);
+
+ /* If this was the last mirror filter, then unset the mirror port */
+ if (list_empty(&block->mirror_entries))
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
struct dpaa2_switch_acl_entry *acl_entry;
+ struct ethsw_core *ethsw = block->ethsw;
struct flow_action_entry *act;
int err;
- if (!flow_offload_has_one_action(&rule->action)) {
- NL_SET_ERR_MSG(extack, "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
@@ -403,15 +506,15 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
goto free_acl_entry;
act = &rule->action.entries[0];
- err = dpaa2_switch_tc_parse_action(ethsw, act,
- &acl_entry->cfg.result, extack);
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
- err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
@@ -423,33 +526,171 @@ free_acl_entry:
return err;
}
-int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct flow_cls_offload *cls)
+static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
+ u16 *vlan)
{
- struct dpaa2_switch_acl_entry *entry;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring is supported only per VLAN");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
- entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
- if (!entry)
- return 0;
+ if (match.mask->vlan_priority != 0 ||
+ match.mask->vlan_dei != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on VLAN ID supported");
+ return -EOPNOTSUPP;
+ }
- return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+ if (match.mask->vlan_id != 0xFFF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *vlan = (u16)match.key->vlan_id;
+ }
+
+ return 0;
}
-int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct tc_cls_matchall_offload *cls)
+static int
+dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
- struct dpaa2_switch_acl_entry *acl_entry;
- struct flow_action_entry *act;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id, vlan;
int err;
- if (!flow_offload_has_one_action(&cls->rule->action)) {
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Parse the key */
+ err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
+ if (err)
+ return err;
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ tmp->cfg.vlan_id == vlan) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
+ mirror_entry->cfg.vlan_id = vlan;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
NL_SET_ERR_MSG(extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
- if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ act = &rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_flower_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_flower_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
@@ -459,15 +700,15 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOMEM;
act = &cls->rule->action.entries[0];
- err = dpaa2_switch_tc_parse_action(ethsw, act,
- &acl_entry->cfg.result, extack);
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
- err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
@@ -479,14 +720,159 @@ free_acl_entry:
return err;
}
-int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+static int
+dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration. One matchall rule per block is the maximum.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matchall mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &cls->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_matchall_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_add;
+ }
+
+ return 0;
+
+unwind_add:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_remove;
+ }
+
+ return 0;
+
+unwind_remove:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
- struct dpaa2_switch_acl_entry *entry;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block,
+ acl_entry);
- entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
- if (!entry)
- return 0;
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
- return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+ return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 68b78642c045..71129724d9ca 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -41,14 +41,14 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e
return NULL;
}
-static struct dpaa2_switch_acl_tbl *
-dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
+static struct dpaa2_switch_filter_block *
+dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
{
int i;
for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (!ethsw->acls[i].in_use)
- return &ethsw->acls[i];
+ if (!ethsw->filter_blocks[i].in_use)
+ return &ethsw->filter_blocks[i];
return NULL;
}
@@ -1127,28 +1127,28 @@ err_exit:
}
static int
-dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
- return dpaa2_switch_cls_flower_replace(acl_tbl, f);
+ return dpaa2_switch_cls_flower_replace(filter_block, f);
case FLOW_CLS_DESTROY:
- return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
+ return dpaa2_switch_cls_flower_destroy(filter_block, f);
default:
return -EOPNOTSUPP;
}
}
static int
-dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *f)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
+ return dpaa2_switch_cls_matchall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
+ return dpaa2_switch_cls_matchall_destroy(block, f);
default:
return -EOPNOTSUPP;
}
@@ -1170,106 +1170,122 @@ static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
static LIST_HEAD(dpaa2_switch_block_cb_list);
-static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+static int
+dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
- if (port_priv->acl_tbl)
+ if (port_priv->filter_block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, &acl_if_cfg);
+ block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
- acl_tbl->ports |= BIT(port_priv->idx);
- port_priv->acl_tbl = acl_tbl;
+ block->ports |= BIT(port_priv->idx);
+ port_priv->filter_block = block;
return 0;
}
static int
dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
- if (port_priv->acl_tbl != acl_tbl)
+ if (port_priv->filter_block != block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, &acl_if_cfg);
+ block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
- acl_tbl->ports &= ~BIT(port_priv->idx);
- port_priv->acl_tbl = NULL;
+ block->ports &= ~BIT(port_priv->idx);
+ port_priv->filter_block = NULL;
return 0;
}
static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+ struct dpaa2_switch_filter_block *block)
{
- struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
+ struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
int err;
+ /* Offload all the mirror entries found in the block on this new port
+ * joining it.
+ */
+ err = dpaa2_switch_block_offload_mirror(block, port_priv);
+ if (err)
+ return err;
+
/* If the port is already bound to this ACL table then do nothing. This
* can happen when this port is the first one to join a tc block
*/
- if (port_priv->acl_tbl == acl_tbl)
+ if (port_priv->filter_block == block)
return 0;
- err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
if (err)
return err;
/* Mark the previous ACL table as being unused if this was the last
* port that was using it.
*/
- if (old_acl_tbl->ports == 0)
- old_acl_tbl->in_use = false;
+ if (old_block->ports == 0)
+ old_block->in_use = false;
- return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
}
-static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+static int
+dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *new_acl_tbl;
+ struct dpaa2_switch_filter_block *new_block;
int err;
+ /* Unoffload all the mirror entries found in the block from the
+ * port leaving it.
+ */
+ err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
+ if (err)
+ return err;
+
/* We are the last port that leaves a block (an ACL table).
* We'll continue to use this table.
*/
- if (acl_tbl->ports == BIT(port_priv->idx))
+ if (block->ports == BIT(port_priv->idx))
return 0;
- err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
if (err)
return err;
- if (acl_tbl->ports == 0)
- acl_tbl->in_use = false;
+ if (block->ports == 0)
+ block->in_use = false;
- new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
- new_acl_tbl->in_use = true;
- return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
+ new_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ new_block->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
}
static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
@@ -1277,7 +1293,7 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
bool register_block = false;
int err;
@@ -1287,24 +1303,24 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
ethsw);
if (!block_cb) {
- /* If the ACL table is not already known, then this port must
- * be the first to join it. In this case, we can just continue
- * to use our private table
+ /* If the filter block is not already known, then this port
+ * must be the first to join it. In this case, we can just
+ * continue to use our private table
*/
- acl_tbl = port_priv->acl_tbl;
+ filter_block = port_priv->filter_block;
block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
- ethsw, acl_tbl, NULL);
+ ethsw, filter_block, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
register_block = true;
} else {
- acl_tbl = flow_block_cb_priv(block_cb);
+ filter_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
- err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_block_bind(port_priv, filter_block);
if (err)
goto err_block_bind;
@@ -1327,7 +1343,7 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
int err;
@@ -1337,8 +1353,8 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
if (!block_cb)
return;
- acl_tbl = flow_block_cb_priv(block_cb);
- err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
+ filter_block = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
if (!err && !flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
@@ -1889,8 +1905,12 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
+static struct notifier_block dpaa2_switch_port_switchdev_nb;
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
+
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
- struct net_device *upper_dev)
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -1906,8 +1926,8 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
other_port_priv = netdev_priv(other_dev);
if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- netdev_err(netdev,
- "Interface from a different DPSW is in the bridge already!\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
return -EINVAL;
}
}
@@ -1929,8 +1949,16 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
if (err)
goto err_egress_flood;
+ err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+ &dpaa2_switch_port_switchdev_nb,
+ &dpaa2_switch_port_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
return 0;
+err_switchdev_offload:
err_egress_flood:
dpaa2_switch_port_set_fdb(port_priv, NULL);
return err;
@@ -1956,6 +1984,13 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
}
+static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+{
+ switchdev_bridge_port_unoffload(netdev, NULL,
+ &dpaa2_switch_port_switchdev_nb,
+ &dpaa2_switch_port_switchdev_blocking_nb);
+}
+
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
@@ -2029,6 +2064,28 @@ static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *net
return 0;
}
+static int
+dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ return 0;
+ }
+
+ return 0;
+}
+
static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2049,25 +2106,23 @@ static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
if (!netif_is_bridge_master(upper_dev))
break;
- if (!br_vlan_enabled(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
- err = -EOPNOTSUPP;
+ err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
+ upper_dev,
+ extack);
+ if (err)
goto out;
- }
- err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Cannot join a bridge while VLAN uppers are present");
- goto out;
- }
+ if (!info->linking)
+ dpaa2_switch_port_pre_bridge_leave(netdev);
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
+ err = dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
else
err = dpaa2_switch_port_bridge_leave(netdev);
}
@@ -2952,7 +3007,7 @@ static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
acl_entry.cfg.precedence = 0;
acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
- return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
+ return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
}
static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
@@ -2965,7 +3020,7 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
};
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct dpsw_fdb_cfg fdb_cfg = {0};
struct dpsw_if_attr dpsw_if_attr;
struct dpaa2_switch_fdb *fdb;
@@ -3020,14 +3075,15 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
- acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
- acl_tbl->ethsw = ethsw;
- acl_tbl->id = acl_tbl_id;
- acl_tbl->in_use = true;
- acl_tbl->num_rules = 0;
- INIT_LIST_HEAD(&acl_tbl->entries);
+ filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ filter_block->ethsw = ethsw;
+ filter_block->acl_id = acl_tbl_id;
+ filter_block->in_use = true;
+ filter_block->num_acl_rules = 0;
+ INIT_LIST_HEAD(&filter_block->acl_entries);
+ INIT_LIST_HEAD(&filter_block->mirror_entries);
- err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
if (err)
return err;
@@ -3081,7 +3137,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
}
kfree(ethsw->fdbs);
- kfree(ethsw->acls);
+ kfree(ethsw->filter_blocks);
kfree(ethsw->ports);
dpaa2_switch_takedown(sw_dev);
@@ -3209,9 +3265,10 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
goto err_free_ports;
}
- ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
- GFP_KERNEL);
- if (!ethsw->acls) {
+ ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
+ sizeof(*ethsw->filter_blocks),
+ GFP_KERNEL);
+ if (!ethsw->filter_blocks) {
err = -ENOMEM;
goto err_free_fdbs;
}
@@ -3242,6 +3299,11 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
if (err)
goto err_stop;
+ /* By convention, if the mirror port is equal to the number of switch
+ * interfaces, then mirroring of any kind is disabled.
+ */
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
/* Register the netdev only when the entire setup is done and the
* switch port interfaces are ready to receive traffic
*/
@@ -3264,7 +3326,7 @@ err_stop:
err_free_netdev:
for (i--; i >= 0; i--)
free_netdev(ethsw->ports[i]->netdev);
- kfree(ethsw->acls);
+ kfree(ethsw->filter_blocks);
err_free_fdbs:
kfree(ethsw->fdbs);
err_free_ports:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
index bdef71f234cb..f69d940f3c5b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -113,20 +113,29 @@ struct dpaa2_switch_acl_entry {
struct dpsw_acl_key key;
};
-struct dpaa2_switch_acl_tbl {
- struct list_head entries;
+struct dpaa2_switch_mirror_entry {
+ struct list_head list;
+ struct dpsw_reflection_cfg cfg;
+ unsigned long cookie;
+ u16 if_id;
+};
+
+struct dpaa2_switch_filter_block {
struct ethsw_core *ethsw;
u64 ports;
-
- u16 id;
- u8 num_rules;
bool in_use;
+
+ struct list_head acl_entries;
+ u16 acl_id;
+ u8 num_acl_rules;
+
+ struct list_head mirror_entries;
};
static inline bool
-dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
{
- if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
return true;
return false;
@@ -149,7 +158,7 @@ struct ethsw_port_priv {
bool ucast_flood;
bool learn_ena;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
};
/* Switch data */
@@ -175,7 +184,8 @@ struct ethsw_core {
int napi_users;
struct dpaa2_switch_fdb *fdbs;
- struct dpaa2_switch_acl_tbl *acls;
+ struct dpaa2_switch_filter_block *filter_blocks;
+ u16 mirror_port;
};
static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
@@ -229,18 +239,24 @@ typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
/* TC offload */
-int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
-int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
-int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
-int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
-int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry);
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
index cb13e740f72b..397d55f2bd99 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -39,11 +39,16 @@
#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
+
#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
+
#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
@@ -533,5 +538,19 @@ struct dpsw_cmd_acl_entry {
__le64 pad2[4];
__le64 key_iova;
};
+
+struct dpsw_cmd_set_reflection_if {
+ __le16 if_id;
+};
+
+#define DPSW_FILTER_SHIFT 0
+#define DPSW_FILTER_SIZE 2
+
+struct dpsw_cmd_if_reflection {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only 2 bits from the LSB */
+ u8 filter;
+};
#pragma pack(pop)
#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
index 6352d6d1ecba..ab921d75deb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -1579,3 +1579,83 @@ int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpsw_set_reflection_if() - Set target interface for traffic mirrored
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Id
+ *
+ * Only one mirroring destination is allowed per switch
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id)
+{
+ struct dpsw_cmd_set_reflection_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_add_reflection() - Setup mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_remove_reflection() - Remove mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
index 5ef221a25b02..892df905b876 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -752,4 +752,35 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+/**
+ * enum dpsw_reflection_filter - Filter type for frames to be reflected
+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to
+ * the particular VLAN defined by vid parameter
+ *
+ */
+enum dpsw_reflection_filter {
+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
+};
+
+/**
+ * struct dpsw_reflection_cfg - Structure representing the mirroring config
+ * @filter: Filter type for frames to be mirrored
+ * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN
+ */
+struct dpsw_reflection_cfg {
+ enum dpsw_reflection_filter filter;
+ u16 vlan_id;
+};
+
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id);
+
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
#endif /* __FSL_DPSW_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c84f6c226743..60d94e0a07d6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -735,7 +735,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
- .ndo_do_ioctl = enetc_ioctl,
+ .ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 03090ba7e226..1a9d1e8b772c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -99,7 +99,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
- .ndo_do_ioctl = enetc_ioctl,
+ .ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
};
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2e002e4b4b4a..ae3259164395 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -77,6 +77,8 @@
#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
+#define FEC_LPI_SLEEP 0x1f4 /* Set IEEE802.3az LPI Sleep Ts time */
+#define FEC_LPI_WAKE 0x1f8 /* Set IEEE802.3az LPI Wake Tw time */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -379,6 +381,9 @@ struct bufdesc_ex {
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+#define FEC_ENET_TXC_DLY ((uint)0x00010000)
+#define FEC_ENET_RXC_DLY ((uint)0x00020000)
+
/* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31)
@@ -472,6 +477,19 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19)
+/* i.MX8MQ ENET IP version add new feature to support IEEE 802.3az EEE
+ * standard. For the transmission, MAC supply two user registers to set
+ * Sleep (TS) and Wake (TW) time.
+ */
+#define FEC_QUIRK_HAS_EEE (1 << 20)
+
+/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+ * as an alternative option to make sure it works well with various PHYs.
+ * For the implementation of delayed clock, ENET takes synchronized 250MHz
+ * clocks to generate 2ns delay.
+ */
+#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 21)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -528,6 +546,7 @@ struct fec_enet_private {
struct clk *clk_ref;
struct clk *clk_enet_out;
struct clk *clk_ptp;
+ struct clk *clk_2x_txclk;
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
@@ -550,6 +569,8 @@ struct fec_enet_private {
uint phy_speed;
phy_interface_t phy_interface;
struct device_node *phy_node;
+ bool rgmii_txc_dly;
+ bool rgmii_rxc_dly;
int link;
int full_duplex;
int speed;
@@ -589,6 +610,10 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
+ /* tx lpi eee mode */
+ struct ethtool_eee eee;
+ unsigned int clk_ref_rate;
+
u32 rx_copybreak;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8aea707a65a7..40ea318d7396 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -135,6 +135,26 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
+static const struct fec_devinfo fec_imx8mq_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_EEE,
+};
+
+static const struct fec_devinfo fec_imx8qm_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -162,6 +182,12 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6ul-fec",
.driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
+ .name = "imx8mq-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
+ }, {
+ .name = "imx8qm-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
+ }, {
/* sentinel */
}
};
@@ -175,6 +201,8 @@ enum imx_fec_type {
MVF600_FEC,
IMX6SX_FEC,
IMX6UL_FEC,
+ IMX8MQ_FEC,
+ IMX8QM_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -185,6 +213,8 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
+ { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
+ { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -1107,6 +1137,13 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
ecntl |= (1 << 4);
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_txc_dly)
+ ecntl |= FEC_ENET_TXC_DLY;
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_rxc_dly)
+ ecntl |= FEC_ENET_RXC_DLY;
+
#ifndef CONFIG_M5272
/* Enable the MIB statistic event counters */
writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
@@ -1970,6 +2007,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
+ ret = clk_prepare_enable(fep->clk_2x_txclk);
+ if (ret)
+ goto failed_clk_2x_txclk;
+
fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
@@ -1980,10 +2021,14 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
mutex_unlock(&fep->ptp_clk_mutex);
}
clk_disable_unprepare(fep->clk_ref);
+ clk_disable_unprepare(fep->clk_2x_txclk);
}
return 0;
+failed_clk_2x_txclk:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
failed_clk_ref:
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -2692,6 +2737,92 @@ static int fec_enet_set_tunable(struct net_device *netdev,
return ret;
}
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+ int ret = 0;
+
+ if (enable) {
+ ret = phy_init_eee(ndev->phydev, 0);
+ if (ret)
+ return ret;
+
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ p->tx_lpi_enabled = enable;
+ p->eee_enabled = enable;
+ p->eee_active = enable;
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
+static int
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ edata->eee_enabled = p->eee_enabled;
+ edata->eee_active = p->eee_active;
+ edata->tx_lpi_timer = p->tx_lpi_timer;
+ edata->tx_lpi_enabled = p->tx_lpi_enabled;
+
+ return phy_ethtool_get_eee(ndev->phydev, edata);
+}
+
+static int
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ int ret = 0;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ p->tx_lpi_timer = edata->tx_lpi_timer;
+
+ if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
+ !edata->tx_lpi_timer)
+ ret = fec_enet_eee_mode_set(ndev, false);
+ else
+ ret = fec_enet_eee_mode_set(ndev, true);
+
+ if (ret)
+ return ret;
+
+ return phy_ethtool_set_eee(ndev->phydev, edata);
+}
+
static void
fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
@@ -2752,6 +2883,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
+ .get_eee = fec_enet_get_eee,
+ .set_eee = fec_enet_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.self_test = net_selftest,
@@ -3280,7 +3413,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_do_ioctl = fec_enet_ioctl,
+ .ndo_eth_ioctl = fec_enet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fec_poll_controller,
#endif
@@ -3586,6 +3719,7 @@ fec_probe(struct platform_device *pdev)
char irq_name[8];
int irq_cnt;
struct fec_devinfo *dev_info;
+ u32 rgmii_delay;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3643,6 +3777,12 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_stop_mode;
+ /* For rgmii internal delay, valid values are 0ps and 2000ps */
+ if (of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_delay))
+ fep->rgmii_txc_dly = true;
+ if (of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_delay))
+ fep->rgmii_rxc_dly = true;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3692,6 +3832,12 @@ fec_probe(struct platform_device *pdev)
fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
if (IS_ERR(fep->clk_ref))
fep->clk_ref = NULL;
+ fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
+
+ /* clk_2x_txclk is optional, depends on board */
+ fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
+ if (IS_ERR(fep->clk_2x_txclk))
+ fep->clk_2x_txclk = NULL;
fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 02c47658a215..73ff359a15f1 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -792,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6ee325ad35c5..2db6e38a772e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -900,7 +900,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9646483137c4..af6ad94bf24a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3184,7 +3184,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_do_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = gfar_ioctl,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0acfafb73db1..3eb288d10b0c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3516,7 +3516,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
- .ndo_do_ioctl = ucc_geth_ioctl,
+ .ndo_eth_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ucc_netpoll,
#endif
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 5bb56b454541..f089d33dd48e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -322,7 +322,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
+ (tail & priv->adminq_mask)) {
int err;
// Flush existing commands to make room.
@@ -332,7 +333,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
// Retry.
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
+ (tail & priv->adminq_mask)) {
// This should never happen. We just flushed the
// command queue so there should be enough space.
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index bb062b02fb85..094e4a37a295 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -90,6 +90,7 @@ config HNS_ENET
config HNS3
tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
depends on PCI
+ select NET_DEVLINK
help
This selects the framework support for Hisilicon Network Subsystem 3.
This layer facilitates clients like ENET, RoCE and user-space ethernet
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 3c4db4a6b431..22bf914f2dbd 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -685,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_open = hisi_femac_net_open,
.ndo_stop = hisi_femac_net_close,
.ndo_start_xmit = hisi_femac_net_xmit,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ad534f9e41ab..343c605c4be8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1945,7 +1945,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index cdb5f14fb6bc..cb8d5da3654f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2852,7 +2852,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_start_xmit = hns3_nic_net_xmit,
.ndo_tx_timeout = hns3_nic_net_timeout,
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
- .ndo_do_ioctl = hns3_nic_do_ioctl,
+ .ndo_eth_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
.ndo_features_check = hns3_features_check,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index a685392dbfe9..d1bf5c4c0abb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
new file mode 100644
index 000000000000..06d29945d4e1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclge_devlink.h"
+
+static int hclge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGE_DEVLINK_FW_STRING_LEN 32
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclge_devlink_ops = {
+ .info_get = hclge_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclge_devlink_reload_down,
+ .reload_up = hclge_devlink_reload_up,
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink = devlink_alloc(&hclge_devlink_ops,
+ sizeof(struct hclge_devlink_priv));
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+
+ ret = devlink_register(devlink, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ hdev->devlink = devlink;
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclge_devlink_uninit(struct hclge_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ if (!devlink)
+ return;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+
+ hdev->devlink = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
new file mode 100644
index 000000000000..918be04507a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEVLINK_H
+#define __HCLGE_DEVLINK_H
+
+#include "hclge_main.h"
+
+struct hclge_devlink_priv {
+ struct hclge_dev *hdev;
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev);
+void hclge_devlink_uninit(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ebeaf12e409b..f15d76ec0068 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -23,6 +23,7 @@
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
+#include "hclge_devlink.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
@@ -11482,10 +11483,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_pci_uninit;
+
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret)
- goto err_pci_uninit;
+ goto err_devlink_uninit;
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -11658,6 +11663,8 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_cmd_uninit(hdev);
+err_devlink_uninit:
+ hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev);
@@ -12048,6 +12055,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
+ hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 3d3352491dba..cc31b12904ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,6 +8,7 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+#include <net/devlink.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -943,6 +944,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
+ struct devlink *devlink;
};
/* VPort level vlan tag configuration for TX direction */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 2c26ea607a53..51ff7d86ee90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -7,4 +7,4 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
new file mode 100644
index 000000000000..21a45279fd99
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclgevf_devlink.h"
+
+static int hclgevf_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGEVF_DEVLINK_FW_STRING_LEN 32
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
+ struct hclgevf_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclgevf_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclgevf_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclgevf_devlink_ops = {
+ .info_get = hclgevf_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclgevf_devlink_reload_down,
+ .reload_up = hclgevf_devlink_reload_up,
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink = devlink_alloc(&hclgevf_devlink_ops,
+ sizeof(struct hclgevf_devlink_priv));
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+
+ ret = devlink_register(devlink, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ hdev->devlink = devlink;
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ if (!devlink)
+ return;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+
+ hdev->devlink = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
new file mode 100644
index 000000000000..e09ea3d8a963
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_DEVLINK_H
+#define __HCLGEVF_DEVLINK_H
+
+#include "hclgevf_main.h"
+
+struct hclgevf_devlink_priv {
+ struct hclgevf_dev *hdev;
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev);
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8784d61e833f..3a19f08bfff3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -8,6 +8,7 @@
#include "hclgevf_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclgevf_devlink.h"
#define HCLGEVF_NAME "hclgevf"
@@ -3337,6 +3338,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_devlink_init;
+
ret = hclgevf_cmd_queue_init(hdev);
if (ret)
goto err_cmd_queue_init;
@@ -3441,6 +3446,8 @@ err_misc_irq_init:
err_cmd_init:
hclgevf_cmd_uninit(hdev);
err_cmd_queue_init:
+ hclgevf_devlink_uninit(hdev);
+err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
@@ -3462,6 +3469,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
}
hclgevf_cmd_uninit(hdev);
+ hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index d7d02848d674..6f222a3a0bf2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -6,6 +6,7 @@
#include <linux/fs.h>
#include <linux/if_vlan.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
@@ -330,6 +331,8 @@ struct hclgevf_dev {
u32 flag;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+
+ struct devlink *devlink;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 471be6ec7e8a..664a91af662d 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -3011,7 +3011,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_stop = emac_close,
.ndo_get_stats = emac_stats,
.ndo_set_rx_mode = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_eth_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
@@ -3023,7 +3023,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_stop = emac_close,
.ndo_get_stats = emac_stats,
.ndo_set_rx_mode = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_eth_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 737ba85e409f..3d9b4f99d357 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1630,7 +1630,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_stop = ibmveth_close,
.ndo_start_xmit = ibmveth_start_xmit,
.ndo_set_rx_mode = ibmveth_set_multicast_list,
- .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_eth_ioctl = ibmveth_ioctl,
.ndo_change_mtu = ibmveth_change_mtu,
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1b0958bd24f6..373eb027b925 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2715,10 +2715,10 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
+ memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test));
break;
case ETH_SS_STATS:
- memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
+ memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats));
break;
}
}
@@ -2809,7 +2809,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = e100_set_multicast_list,
.ndo_set_mac_address = e100_set_mac_address,
- .ndo_do_ioctl = e100_do_ioctl,
+ .ndo_eth_ioctl = e100_do_ioctl,
.ndo_tx_timeout = e100_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e100_netpoll,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index c2a109126c27..bed4f040face 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -832,7 +832,7 @@ static const struct net_device_ops e1000_netdev_ops = {
.ndo_set_mac_address = e1000_set_mac,
.ndo_tx_timeout = e1000_tx_timeout,
.ndo_change_mtu = e1000_change_mtu,
- .ndo_do_ioctl = e1000_ioctl,
+ .ndo_eth_ioctl = e1000_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 06442e6bef73..7256b43b7a65 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -903,6 +903,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
mask |= BIT(18);
break;
default:
@@ -1569,6 +1570,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index db79c4e6413e..bcf680e83811 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -98,14 +98,22 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
+#define E1000_DEV_ID_PCH_RPL_I219_LM23 0x0DC5
+#define E1000_DEV_ID_PCH_RPL_I219_V23 0x0DC6
#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
+#define E1000_DEV_ID_PCH_RPL_I219_LM22 0x0DC7
+#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
+#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
+#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
+#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
+#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511
#define E1000_REVISION_4 4
@@ -132,6 +140,7 @@ enum e1000_mac_type {
e1000_pch_tgp,
e1000_pch_adp,
e1000_pch_mtp,
+ e1000_pch_lnp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index cf7b3887da1d..2f97c9f5611d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -321,6 +321,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -466,6 +467,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -711,6 +713,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1266,9 +1269,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
usleep_range(10000, 11000);
}
if (firmware_bug)
- e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n",
+ i * 10);
else
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ e_dbg("ULP_CONFIG_DONE cleared after %d msec\n",
+ i * 10);
if (force) {
mac_reg = er32(H2ME);
@@ -1663,6 +1668,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2118,6 +2124,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3162,6 +3169,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4101,6 +4109,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 1502895eb45d..9b145f6248a8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -41,12 +41,15 @@
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
+#define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */
+#define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */
#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 757a54c39eef..900b3ab998bd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3550,6 +3550,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4068,6 +4069,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -6343,42 +6345,110 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- /* Disable the periodic inband message,
- * don't request PCIe clock in K1 page770_17[10:9] = 10b
- */
- e1e_rphy(hw, HV_PM_CTRL, &phy_data);
- phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
- phy_data |= BIT(10);
- e1e_wphy(hw, HV_PM_CTRL, phy_data);
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure the device for S0ix */
+ mac_data = er32(H2ME);
+ mac_data |= E1000_H2ME_START_DPG;
+ mac_data &= ~E1000_H2ME_EXIT_DPG;
+ ew32(H2ME, mac_data);
+ } else {
+ /* Request driver configure the device to S0ix */
+ /* Disable the periodic inband message,
+ * don't request PCIe clock in K1 page770_17[10:9] = 10b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
+ phy_data |= BIT(10);
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
- /* Make sure we don't exit K1 every time a new packet arrives
- * 772_29[5] = 1 CS_Mode_Stay_In_K1
- */
- e1e_rphy(hw, I217_CGFREG, &phy_data);
- phy_data |= BIT(5);
- e1e_wphy(hw, I217_CGFREG, phy_data);
+ /* Make sure we don't exit K1 every time a new packet arrives
+ * 772_29[5] = 1 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data |= BIT(5);
+ e1e_wphy(hw, I217_CGFREG, phy_data);
- /* Change the MAC/PHY interface to SMBus
- * Force the SMBus in PHY page769_23[0] = 1
- * Force the SMBus in MAC CTRL_EXT[11] = 1
- */
- e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
- phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, phy_data);
- mac_data = er32(CTRL_EXT);
- mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_data);
+ /* Change the MAC/PHY interface to SMBus
+ * Force the SMBus in PHY page769_23[0] = 1
+ * Force the SMBus in MAC CTRL_EXT[11] = 1
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+
+ /* DFT control: PHY bit: page769_20[0] = 1
+ * page769_20[7] - PHY PLL stop
+ * page769_20[8] - PHY go to the electrical idle
+ * page769_20[9] - PHY serdes disable
+ * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
+ */
+ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
+ phy_data |= BIT(0);
+ phy_data |= BIT(7);
+ phy_data |= BIT(8);
+ phy_data |= BIT(9);
+ e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+
+ mac_data = er32(EXTCNF_CTRL);
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+ }
- /* DFT control: PHY bit: page769_20[0] = 1
- * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
- */
- e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
- phy_data |= BIT(0);
- e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+ mac_data &= ~BIT(0);
+ ew32(FEXTNVM7, mac_data);
- mac_data = er32(EXTCNF_CTRL);
- mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
- ew32(EXTCNF_CTRL, mac_data);
+ /* Dynamic Power Gating Enable */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= BIT(3);
+ ew32(CTRL_EXT, mac_data);
/* Check MAC Tx/Rx packet buffer pointers.
* Reset MAC Tx/Rx packet buffer pointers to suppress any
@@ -6414,148 +6484,130 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data = er32(RDFPC);
if (mac_data)
ew32(RDFPC, 0);
-
- /* Enable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(22);
- ew32(FEXTNVM7, mac_data);
-
- /* Disable the time synchronization clock */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(31);
- mac_data &= ~BIT(0);
- ew32(FEXTNVM7, mac_data);
-
- /* Dynamic Power Gating Enable */
- mac_data = er32(CTRL_EXT);
- mac_data |= BIT(3);
- ew32(CTRL_EXT, mac_data);
-
- /* Disable disconnected cable conditioning for Power Gating */
- mac_data = er32(DPGFR);
- mac_data |= BIT(2);
- ew32(DPGFR, mac_data);
-
- /* Don't wake from dynamic Power Gating with clock request */
- mac_data = er32(FEXTNVM12);
- mac_data |= BIT(12);
- ew32(FEXTNVM12, mac_data);
-
- /* Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Enable K1 off to enable mPHY Power Gating */
- mac_data = er32(FEXTNVM6);
- mac_data |= BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Enable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data |= BIT(9);
- ew32(FEXTNVM8, mac_data);
-
- /* Enable the Dynamic Clock Gating in the DMA and MAC */
- mac_data = er32(CTRL_EXT);
- mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
- ew32(CTRL_EXT, mac_data);
-
- /* No MAC DPG gating SLP_S0 in modern standby
- * Switch the logic of the lanphypc to use PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data |= BIT(7);
- ew32(FEXTNVM5, mac_data);
}
static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ bool firmware_bug = false;
u32 mac_data;
u16 phy_data;
+ u32 i = 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME unconfigure the device from S0ix */
+ mac_data = er32(H2ME);
+ mac_data &= ~E1000_H2ME_START_DPG;
+ mac_data |= E1000_H2ME_EXIT_DPG;
+ ew32(H2ME, mac_data);
+
+ /* Poll up to 2.5 seconds for ME to unconfigure DPG.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
+ while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) {
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
- /* Disable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data &= 0xFFBFFFFF;
- ew32(FEXTNVM7, mac_data);
+ if (i++ == 250) {
+ e_dbg("Timeout (firmware bug): %d msec\n",
+ i * 10);
+ break;
+ }
- /* Enable the time synchronization clock */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(0);
- ew32(FEXTNVM7, mac_data);
+ usleep_range(10000, 11000);
+ }
+ if (firmware_bug)
+ e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n",
+ i * 10);
+ else
+ e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10);
+ } else {
+ /* Request driver unconfigure the device from S0ix */
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
- /* Disable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data &= ~BIT(9);
- ew32(FEXTNVM8, mac_data);
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+ mac_data = er32(DPGFR);
+ mac_data &= ~BIT(2);
+ ew32(DPGFR, mac_data);
- /* Disable K1 off */
- mac_data = er32(FEXTNVM6);
- mac_data &= ~BIT(31);
- ew32(FEXTNVM6, mac_data);
+ /* Disable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
- /* Disable Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
- ew32(FEXTNVM9, mac_data);
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
- /* Cancel not waking from dynamic
- * Power Gating with clock request
- */
- mac_data = er32(FEXTNVM12);
- mac_data &= ~BIT(12);
- ew32(FEXTNVM12, mac_data);
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= 0xFBFF;
+ phy_data |= HV_PM_CTRL_K1_CLK_REQ;
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
- /* Cancel disable disconnected cable conditioning
- * for Power Gating
- */
- mac_data = er32(DPGFR);
- mac_data &= ~BIT(2);
- ew32(DPGFR, mac_data);
+ /* Return back configuration
+ * 772_29[5] = 0 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data &= 0xFFDF;
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to Kumeran
+ * Unforce the SMBus in PHY page769_23[0] = 0
+ * Unforce the SMBus in MAC CTRL_EXT[11] = 0
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+ }
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
ew32(CTRL_EXT, mac_data);
- /* Disable the Dynamic Clock Gating in the DMA and MAC */
- mac_data = er32(CTRL_EXT);
- mac_data &= 0xFFF7FFFF;
- ew32(CTRL_EXT, mac_data);
-
- /* Revert the lanphypc logic to use the internal Gbe counter
- * and not the PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data &= 0xFFFFFF7F;
- ew32(FEXTNVM5, mac_data);
-
- /* Enable the periodic inband message,
- * Request PCIe clock in K1 page770_17[10:9] =01b
- */
- e1e_rphy(hw, HV_PM_CTRL, &phy_data);
- phy_data &= 0xFBFF;
- phy_data |= HV_PM_CTRL_K1_CLK_REQ;
- e1e_wphy(hw, HV_PM_CTRL, phy_data);
-
- /* Return back configuration
- * 772_29[5] = 0 CS_Mode_Stay_In_K1
- */
- e1e_rphy(hw, I217_CGFREG, &phy_data);
- phy_data &= 0xFFDF;
- e1e_wphy(hw, I217_CGFREG, phy_data);
-
- /* Change the MAC/PHY interface to Kumeran
- * Unforce the SMBus in PHY page769_23[0] = 0
- * Unforce the SMBus in MAC CTRL_EXT[11] = 0
- */
- e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
- phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, phy_data);
- mac_data = er32(CTRL_EXT);
- mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_data);
+ /* Enable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= ~BIT(31);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
}
static int e1000e_pm_freeze(struct device *dev)
@@ -7302,7 +7354,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
- .ndo_do_ioctl = e1000_ioctl,
+ .ndo_eth_ioctl = e1000_ioctl,
.ndo_tx_timeout = e1000_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -7677,7 +7729,7 @@ err_dma:
* @pdev: PCI device information struct
*
* e1000_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
@@ -7850,14 +7902,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 9e79d672f4f1..eb5c014c02fb 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -298,6 +298,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 8165ba2619a4..6c0cd8cab3ef 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -213,6 +213,7 @@
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_EXFWSM 0x05B58 /* Extended FW Semaphore */
/* Driver-only SW semaphore (not used by BOOT agents) */
#define E1000_SWSM2 0x05B58
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b9417dc0007c..39fb3d57c057 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -428,6 +428,8 @@ struct i40e_channel {
struct i40e_vsi *parent_vsi;
};
+struct i40e_ptp_pins_settings;
+
static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
{
return !!ch->fwd;
@@ -644,12 +646,83 @@ struct i40e_pf {
struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */
struct i40e_dcbx_config tmp_cfg;
+/* GPIO defines used by PTP */
+#define I40E_SDP3_2 18
+#define I40E_SDP3_3 19
+#define I40E_GPIO_4 20
+#define I40E_LED2_0 26
+#define I40E_LED2_1 27
+#define I40E_LED3_0 28
+#define I40E_LED3_1 29
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_HI \
+ (1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRV_SDP_DATA \
+ (1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_0 \
+ (0 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_1 \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_RESERVED BIT(2)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_DIR_OUT \
+ (1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_DRV_HI \
+ (1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_HI_RST \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_0 \
+ (3 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_1 \
+ (4 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN \
+ (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_LED_INIT \
+ (I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z | \
+ I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | \
+ I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_OUT_DEFAULT | \
+ I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN)
+#define I40E_PRTTSYN_AUX_1_INSTNT \
+ (1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE \
+ (1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_CLK_MOD (3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD \
+ (I40E_PRTTSYN_AUX_0_OUT_ENABLE | I40E_PRTTSYN_AUX_0_OUT_CLK_MOD)
+#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */
+#define I40E_PTP_2_SEC_DELAY 2
+
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
+ struct work_struct ptp_pps_work;
+ struct work_struct ptp_extts0_work;
+ struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
@@ -657,10 +730,14 @@ struct i40e_pf {
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
+ u64 ptp_pps_start;
+ u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
+ struct i40e_ptp_pins_settings *ptp_pins;
u16 rss_table_size; /* HW RSS table size */
u32 max_bw;
u32 min_bw;
@@ -1169,6 +1246,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1d1f52756a93..97c78551395b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4079,10 +4079,13 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
- icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
+ schedule_work(&pf->ptp_extts0_work);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
i40e_ptp_tx_hwtstamp(pf);
- }
+
+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
}
/* If a critical error is pending we have no choice but to reset the
@@ -13265,7 +13268,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_do_ioctl = i40e_ioctl,
+ .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -15181,6 +15184,22 @@ err_switch_setup:
}
/**
+ * i40e_set_subsystem_device_id - set subsystem device id
+ * @hw: pointer to the hardware info
+ *
+ * Set PCI subsystem device id either from a pci_dev structure or
+ * a specific FW register.
+ **/
+static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
+{
+ struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
+
+ hw->subsystem_device_id = pdev->subsystem_device ?
+ pdev->subsystem_device :
+ (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -15275,7 +15294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->device_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
+ i40e_set_subsystem_device_id(hw);
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
@@ -15455,6 +15474,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_valid_ether_addr(hw->mac.port_addr))
pf->hw_features |= I40E_HW_PORT_ID_VALID;
+ i40e_ptp_alloc_pins(pf);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 7b971b205d36..09b1d5aed1c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -3,6 +3,7 @@
#include "i40e.h"
#include <linux/ptp_classify.h>
+#include <linux/posix-clock.h>
/* The XL710 timesync is very much like Intel's 82599 design when it comes to
* the fundamental clock design. However, the clock operations are much simpler
@@ -20,10 +21,252 @@
#define I40E_PTP_10GB_INCVAL_MULT 2
#define I40E_PTP_5GB_INCVAL_MULT 2
#define I40E_PTP_1GB_INCVAL_MULT 20
+#define I40E_ISGN 0x80000000
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
+enum i40e_ptp_pin {
+ SDP3_2 = 0,
+ SDP3_3,
+ GPIO_4
+};
+
+enum i40e_can_set_pins_t {
+ CANT_DO_PINS = -1,
+ CAN_SET_PINS,
+ CAN_DO_PINS
+};
+
+static struct ptp_pin_desc sdp_desc[] = {
+ /* name idx func chan */
+ {"SDP3_2", SDP3_2, PTP_PF_NONE, 0},
+ {"SDP3_3", SDP3_3, PTP_PF_NONE, 1},
+ {"GPIO_4", GPIO_4, PTP_PF_NONE, 1},
+};
+
+enum i40e_ptp_gpio_pin_state {
+ end = -2,
+ invalid,
+ off,
+ in_A,
+ in_B,
+ out_A,
+ out_B,
+};
+
+static const char * const i40e_ptp_gpio_pin_state2str[] = {
+ "off", "in_A", "in_B", "out_A", "out_B"
+};
+
+enum i40e_ptp_led_pin_state {
+ led_end = -2,
+ low = 0,
+ high,
+};
+
+struct i40e_ptp_pins_settings {
+ enum i40e_ptp_gpio_pin_state sdp3_2;
+ enum i40e_ptp_gpio_pin_state sdp3_3;
+ enum i40e_ptp_gpio_pin_state gpio_4;
+ enum i40e_ptp_led_pin_state led2_0;
+ enum i40e_ptp_led_pin_state led2_1;
+ enum i40e_ptp_led_pin_state led3_0;
+ enum i40e_ptp_led_pin_state led3_1;
+};
+
+static const struct i40e_ptp_pins_settings
+ i40e_ptp_pin_led_allowed_states[] = {
+ {off, off, off, high, high, high, high},
+ {off, in_A, off, high, high, high, low},
+ {off, out_A, off, high, low, high, high},
+ {off, in_B, off, high, high, high, low},
+ {off, out_B, off, high, low, high, high},
+ {in_A, off, off, high, high, high, low},
+ {in_A, in_B, off, high, high, high, low},
+ {in_A, out_B, off, high, low, high, high},
+ {out_A, off, off, high, low, high, high},
+ {out_A, in_B, off, high, low, high, high},
+ {in_B, off, off, high, high, high, low},
+ {in_B, in_A, off, high, high, high, low},
+ {in_B, out_A, off, high, low, high, high},
+ {out_B, off, off, high, low, high, high},
+ {out_B, in_A, off, high, low, high, high},
+ {off, off, in_A, high, high, low, high},
+ {off, out_A, in_A, high, low, low, high},
+ {off, in_B, in_A, high, high, low, low},
+ {off, out_B, in_A, high, low, low, high},
+ {out_A, off, in_A, high, low, low, high},
+ {out_A, in_B, in_A, high, low, low, high},
+ {in_B, off, in_A, high, high, low, low},
+ {in_B, out_A, in_A, high, low, low, high},
+ {out_B, off, in_A, high, low, low, high},
+ {off, off, out_A, low, high, high, high},
+ {off, in_A, out_A, low, high, high, low},
+ {off, in_B, out_A, low, high, high, low},
+ {off, out_B, out_A, low, low, high, high},
+ {in_A, off, out_A, low, high, high, low},
+ {in_A, in_B, out_A, low, high, high, low},
+ {in_A, out_B, out_A, low, low, high, high},
+ {in_B, off, out_A, low, high, high, low},
+ {in_B, in_A, out_A, low, high, high, low},
+ {out_B, off, out_A, low, low, high, high},
+ {out_B, in_A, out_A, low, low, high, high},
+ {off, off, in_B, high, high, low, high},
+ {off, in_A, in_B, high, high, low, low},
+ {off, out_A, in_B, high, low, low, high},
+ {off, out_B, in_B, high, low, low, high},
+ {in_A, off, in_B, high, high, low, low},
+ {in_A, out_B, in_B, high, low, low, high},
+ {out_A, off, in_B, high, low, low, high},
+ {out_B, off, in_B, high, low, low, high},
+ {out_B, in_A, in_B, high, low, low, high},
+ {off, off, out_B, low, high, high, high},
+ {off, in_A, out_B, low, high, high, low},
+ {off, out_A, out_B, low, low, high, high},
+ {off, in_B, out_B, low, high, high, low},
+ {in_A, off, out_B, low, high, high, low},
+ {in_A, in_B, out_B, low, high, high, low},
+ {out_A, off, out_B, low, low, high, high},
+ {out_A, in_B, out_B, low, low, high, high},
+ {in_B, off, out_B, low, high, high, low},
+ {in_B, in_A, out_B, low, high, high, low},
+ {in_B, out_A, out_B, low, low, high, high},
+ {end, end, end, led_end, led_end, led_end, led_end}
+};
+
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins);
+
+/**
+ * i40e_ptp_extts0_work - workqueue task function
+ * @work: workqueue task structure
+ *
+ * Service for PTP external clock event
+ **/
+static void i40e_ptp_extts0_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_extts0_work);
+ struct i40e_hw *hw = &pf->hw;
+ struct ptp_clock_event event;
+ u32 hi, lo;
+
+ /* Event time is captured by one of the two matched registers
+ * PRTTSYN_EVNT_L: 32 LSB of sampled time event
+ * PRTTSYN_EVNT_H: 32 MSB of sampled time event
+ * Event is defined in PRTTSYN_EVNT_0 register
+ */
+ lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0));
+ hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0));
+
+ event.timestamp = (((u64)hi) << 32) | lo;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = hw->pf_id;
+
+ /* fire event */
+ ptp_clock_event(pf->ptp_clock, &event);
+}
+
+/**
+ * i40e_is_ptp_pin_dev - check if device supports PTP pins
+ * @hw: pointer to the hardware structure
+ *
+ * Return true if device supports PTP pins, false otherwise.
+ **/
+static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
+{
+ return hw->device_id == I40E_DEV_ID_25G_SFP28 &&
+ hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN;
+}
+
+/**
+ * i40e_can_set_pins - check possibility of manipulating the pins
+ * @pf: board private structure
+ *
+ * Check if all conditions are satisfied to manipulate PTP pins.
+ * Return CAN_SET_PINS if pins can be set on a specific PF or
+ * return CAN_DO_PINS if pins can be manipulated within a NIC or
+ * return CANT_DO_PINS otherwise.
+ **/
+static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+ dev_warn(&pf->pdev->dev,
+ "PTP external clock not supported.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PIN manipulation not allowed.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (pf->hw.pf_id) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PINs should be accessed via PF0.\n");
+ return CAN_DO_PINS;
+ }
+
+ return CAN_SET_PINS;
+}
+
+/**
+ * i40_ptp_reset_timing_events - Reset PTP timing events
+ * @pf: Board private structure
+ *
+ * This function resets timing events for pf.
+ **/
+static void i40_ptp_reset_timing_events(struct i40e_pf *pf)
+{
+ u32 i;
+
+ spin_lock_bh(&pf->ptp_rx_lock);
+ for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) {
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i));
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_events[i] = 0;
+ }
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L);
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H);
+
+ pf->tx_hwtstamp_timeouts = 0;
+ pf->tx_hwtstamp_skipped = 0;
+ pf->rx_hwtstamp_cleared = 0;
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
+}
+
+/**
+ * i40e_ptp_verify - check pins
+ * @ptp: ptp clock
+ * @pin: pin index
+ * @func: assigned function
+ * @chan: channel
+ *
+ * Check pins consistency.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
/**
* i40e_ptp_read - Read the PHC time from the device
@@ -137,6 +380,37 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
/**
+ * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins
+ * @pf: the PF private data structure
+ *
+ * Configure 1PPS signal used for PTP pins
+ **/
+static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct timespec64 now;
+ u64 ns;
+
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), 0);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE);
+
+ i40e_ptp_read(pf, &now, NULL);
+ now.tv_sec += I40E_PTP_2_SEC_DELAY;
+ now.tv_nsec = 0;
+ ns = timespec64_to_ns(&now);
+
+ /* I40E_PRTTSYN_TGT_L(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF);
+ /* I40E_PRTTSYN_TGT_H(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32);
+ wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1),
+ I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD);
+}
+
+/**
* i40e_ptp_adjtime - Adjust the PHC time
* @ptp: The PTP clock structure
* @delta: Offset in nanoseconds to adjust the PHC time by
@@ -146,14 +420,35 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then;
+ struct i40e_hw *hw = &pf->hw;
- then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, &now, NULL);
- now = timespec64_add(now, then);
- i40e_ptp_write(pf, (const struct timespec64 *)&now);
+ if (delta > -999999900LL && delta < 999999900LL) {
+ int neg_adj = 0;
+ u32 timadj;
+ u64 tohw;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ tohw = -delta;
+ } else {
+ tohw = delta;
+ }
+
+ timadj = tohw & 0x3FFFFFFF;
+ if (neg_adj)
+ timadj |= I40E_ISGN;
+ wr32(hw, I40E_PRTTSYN_ADJ, timadj);
+ } else {
+ struct timespec64 then, now;
+
+ then = ns_to_timespec64(delta);
+ i40e_ptp_read(pf, &now, NULL);
+ now = timespec64_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
+ i40e_ptp_set_1pps_signal_hw(pf);
+ }
mutex_unlock(&pf->tmreg_lock);
@@ -184,7 +479,7 @@ static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
/**
* i40e_ptp_settime - Set the time of the PHC
* @ptp: The PTP clock structure
- * @ts: timespec structure that holds the new time value
+ * @ts: timespec64 structure that holds the new time value
*
* Set the device clock to the user input value. The conversion from timespec
* to ns happens in the write function.
@@ -202,18 +497,145 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
+ * i40e_pps_configure - configure PPS events
+ * @ptp: ptp clock
+ * @rq: clock request
+ * @on: status
+ *
+ * Configure PPS events for external clock source.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ if (!!on)
+ i40e_ptp_set_1pps_signal_hw(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_pin_state - determine PIN state
+ * @index: PIN index
+ * @func: function assigned to PIN
+ *
+ * Determine PIN state based on PIN index and function assigned.
+ * Return PIN state.
+ **/
+static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func)
+{
+ enum i40e_ptp_gpio_pin_state state = off;
+
+ if (index == 0 && func == PTP_PF_EXTTS)
+ state = in_A;
+ if (index == 1 && func == PTP_PF_EXTTS)
+ state = in_B;
+ if (index == 0 && func == PTP_PF_PEROUT)
+ state = out_A;
+ if (index == 1 && func == PTP_PF_PEROUT)
+ state = out_B;
+
+ return state;
+}
+
+/**
+ * i40e_ptp_enable_pin - enable PINs.
+ * @pf: private board structure
+ * @chan: channel
+ * @func: PIN function
+ * @on: state
+ *
+ * Enable PTP pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
+ enum ptp_pin_function func, int on)
+{
+ enum i40e_ptp_gpio_pin_state *pin = NULL;
+ struct i40e_ptp_pins_settings pins;
+ int pin_index;
+
+ /* Use PF0 to set pins. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ /* Preserve previous state of pins that we don't touch */
+ pins.sdp3_2 = pf->ptp_pins->sdp3_2;
+ pins.sdp3_3 = pf->ptp_pins->sdp3_3;
+ pins.gpio_4 = pf->ptp_pins->gpio_4;
+
+ /* To turn on the pin - find the corresponding one based on
+ * the given index. To to turn the function off - find
+ * which pin had it assigned. Don't use ptp_find_pin here
+ * because it tries to lock the pincfg_mux which is locked by
+ * ptp_pin_store() that calls here.
+ */
+ if (on) {
+ pin_index = ptp_find_pin(pf->ptp_clock, func, chan);
+ if (pin_index < 0)
+ return -EBUSY;
+
+ switch (pin_index) {
+ case SDP3_2:
+ pin = &pins.sdp3_2;
+ break;
+ case SDP3_3:
+ pin = &pins.sdp3_3;
+ break;
+ case GPIO_4:
+ pin = &pins.gpio_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *pin = i40e_pin_state(chan, func);
+ } else {
+ pins.sdp3_2 = off;
+ pins.sdp3_3 = off;
+ pins.gpio_4 = off;
+ }
+
+ return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0;
+}
+
+/**
+ * i40e_ptp_feature_enable - Enable external clock pins
* @ptp: The PTP clock structure
- * @rq: The requested feature to change
- * @on: Enable/disable flag
+ * @rq: The PTP clock request structure
+ * @on: To turn feature on/off
*
- * The XL710 does not support any of the ancillary features of the PHC
- * subsystem, so this function may just return.
+ * Setting on/off PTP PPS feature for pin.
**/
static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+ struct ptp_clock_request *rq,
+ int on)
{
- return -EOPNOTSUPP;
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ enum ptp_pin_function func;
+ unsigned int chan;
+
+ /* TODO: Implement flags handling for EXTTS and PEROUT */
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ func = PTP_PF_EXTTS;
+ chan = rq->extts.index;
+ break;
+ case PTP_CLK_REQ_PEROUT:
+ func = PTP_PF_PEROUT;
+ chan = rq->perout.index;
+ break;
+ case PTP_CLK_REQ_PPS:
+ return i40e_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return i40e_ptp_enable_pin(pf, chan, func, on);
}
/**
@@ -528,6 +950,229 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
}
/**
+ * i40e_ptp_free_pins - free memory used by PTP pins
+ * @pf: Board private structure
+ *
+ * Release memory allocated for PTP pins.
+ **/
+static void i40e_ptp_free_pins(struct i40e_pf *pf)
+{
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ kfree(pf->ptp_pins);
+ kfree(pf->ptp_caps.pin_config);
+ pf->ptp_pins = NULL;
+ }
+}
+
+/**
+ * i40e_ptp_set_pin_hw - Set HW GPIO pin
+ * @hw: pointer to the hardware structure
+ * @pin: pin index
+ * @state: pin state
+ *
+ * Set status of GPIO pin for external clock handling.
+ **/
+static void i40e_ptp_set_pin_hw(struct i40e_hw *hw,
+ unsigned int pin,
+ enum i40e_ptp_gpio_pin_state state)
+{
+ switch (state) {
+ case off:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0);
+ break;
+ case in_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0);
+ break;
+ case in_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0);
+ break;
+ case out_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1);
+ break;
+ case out_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_set_led_hw - Set HW GPIO led
+ * @hw: pointer to the hardware structure
+ * @led: led index
+ * @state: led state
+ *
+ * Set status of GPIO led for external clock handling.
+ **/
+static void i40e_ptp_set_led_hw(struct i40e_hw *hw,
+ unsigned int led,
+ enum i40e_ptp_led_pin_state state)
+{
+ switch (state) {
+ case low:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led);
+ break;
+ case high:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA |
+ I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_init_leds_hw - init LEDs
+ * @hw: pointer to a hardware structure
+ *
+ * Set initial state of LEDs
+ **/
+static void i40e_ptp_init_leds_hw(struct i40e_hw *hw)
+{
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+}
+
+/**
+ * i40e_ptp_set_pins_hw - Set HW GPIO pins
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pins for PTP
+ **/
+static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+{
+ const struct i40e_ptp_pins_settings *pins = pf->ptp_pins;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* pin must be disabled before it may be used */
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4);
+
+ i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1);
+
+ dev_info(&pf->pdev->dev,
+ "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+}
+
+/**
+ * i40e_ptp_set_pins - set PTP pins in HW
+ * @pf: Board private structure
+ * @pins: PTP pins to be applied
+ *
+ * Validate and set PTP pins in HW for specific PF.
+ * Return 0 on success or negative value on error.
+ **/
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins)
+{
+ enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ int i = 0;
+
+ if (pin_caps == CANT_DO_PINS)
+ return -EOPNOTSUPP;
+ else if (pin_caps == CAN_DO_PINS)
+ return 0;
+
+ if (pins->sdp3_2 == invalid)
+ pins->sdp3_2 = pf->ptp_pins->sdp3_2;
+ if (pins->sdp3_3 == invalid)
+ pins->sdp3_3 = pf->ptp_pins->sdp3_3;
+ if (pins->gpio_4 == invalid)
+ pins->gpio_4 = pf->ptp_pins->gpio_4;
+ while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) {
+ if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 &&
+ pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 &&
+ pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) {
+ pins->led2_0 =
+ i40e_ptp_pin_led_allowed_states[i].led2_0;
+ pins->led2_1 =
+ i40e_ptp_pin_led_allowed_states[i].led2_1;
+ pins->led3_0 =
+ i40e_ptp_pin_led_allowed_states[i].led3_0;
+ pins->led3_1 =
+ i40e_ptp_pin_led_allowed_states[i].led3_1;
+ break;
+ }
+ i++;
+ }
+ if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) {
+ dev_warn(&pf->pdev->dev,
+ "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+
+ return -EPERM;
+ }
+ memcpy(pf->ptp_pins, pins, sizeof(*pins));
+ i40e_ptp_set_pins_hw(pf);
+ i40_ptp_reset_timing_events(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_alloc_pins - allocate PTP pins structure
+ * @pf: Board private structure
+ *
+ * allocate PTP pins structure
+ **/
+int i40e_ptp_alloc_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw))
+ return 0;
+
+ pf->ptp_pins =
+ kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL);
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
+ return -I40E_ERR_NO_MEMORY;
+ }
+
+ pf->ptp_pins->sdp3_2 = off;
+ pf->ptp_pins->sdp3_3 = off;
+ pf->ptp_pins->gpio_4 = off;
+ pf->ptp_pins->led2_0 = high;
+ pf->ptp_pins->led2_1 = high;
+ pf->ptp_pins->led3_0 = high;
+ pf->ptp_pins->led3_1 = high;
+
+ /* Use PF0 to set pins in HW. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ i40e_ptp_init_leds_hw(&pf->hw);
+ i40e_ptp_set_pins_hw(pf);
+
+ return 0;
+}
+
+/**
* i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
@@ -545,6 +1190,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
+ /* Selects external trigger to cause event */
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ /* Bit 17:16 is EVNTLVL, 01B rising edge */
+ regval &= 0;
+ regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT);
+ /* regval: 0001 0000 0000 0000 0000 */
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Enabel interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
+
/* Reserved for future extensions. */
if (config->flags)
return -EINVAL;
@@ -688,6 +1348,45 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
}
/**
+ * i40e_init_pin_config - initialize pins.
+ * @pf: private board structure
+ *
+ * Initialize pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_init_pin_config(struct i40e_pf *pf)
+{
+ int i;
+
+ pf->ptp_caps.n_pins = 3;
+ pf->ptp_caps.n_ext_ts = 2;
+ pf->ptp_caps.pps = 1;
+ pf->ptp_caps.n_per_out = 2;
+
+ pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins,
+ sizeof(*pf->ptp_caps.pin_config),
+ GFP_KERNEL);
+ if (!pf->ptp_caps.pin_config)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->ptp_caps.n_pins; i++) {
+ snprintf(pf->ptp_caps.pin_config[i].name,
+ sizeof(pf->ptp_caps.pin_config[i].name),
+ "%s", sdp_desc[i].name);
+ pf->ptp_caps.pin_config[i].index = sdp_desc[i].index;
+ pf->ptp_caps.pin_config[i].func = PTP_PF_NONE;
+ pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan;
+ }
+
+ pf->ptp_caps.verify = i40e_ptp_verify;
+ pf->ptp_caps.enable = i40e_ptp_feature_enable;
+
+ pf->ptp_caps.pps = 1;
+
+ return 0;
+}
+
+/**
* i40e_ptp_create_clock - Create PTP clock device for userspace
* @pf: Board private structure
*
@@ -707,13 +1406,16 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.n_ext_ts = 0;
- pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
- pf->ptp_caps.enable = i40e_ptp_feature_enable;
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ int err = i40e_init_pin_config(pf);
+
+ if (err)
+ return err;
+ }
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
@@ -843,6 +1545,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* Restore the clock time based on last known value */
i40e_ptp_restore_hw_time(pf);
}
+
+ i40e_ptp_set_1pps_signal_hw(pf);
}
/**
@@ -854,6 +1558,9 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_hw *hw = &pf->hw;
+ u32 regval;
+
pf->flags &= ~I40E_FLAG_PTP;
pf->ptp_tx = false;
pf->ptp_rx = false;
@@ -872,4 +1579,21 @@ void i40e_ptp_stop(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
pf->vsi[pf->lan_vsi]->netdev->name);
}
+
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+ }
+
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK;
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Disable interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ i40e_ptp_free_pins(pf);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 36f7b27a04ae..8d0588a27a05 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -182,11 +182,20 @@
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
@@ -540,6 +549,7 @@
#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
@@ -742,6 +752,8 @@
#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
@@ -760,7 +772,10 @@
#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
@@ -768,6 +783,20 @@
#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index eff0a30790dd..472f56b360b8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
}
/**
- * i40e_getnum_vf_vsi_vlan_filters
+ * __i40e_getnum_vf_vsi_vlan_filters
* @vsi: pointer to the vsi
*
* called to get the number of VLANs offloaded on this VF
**/
-static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
u16 num_vlans = 0, bkt;
@@ -1179,6 +1179,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ int num_vlans;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return num_vlans;
+}
+
+/**
* i40e_get_vlan_list_sync
* @vsi: pointer to the VSI
* @num_vlans: number of VLANs in mac_filter_hash, returned to caller
@@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
int bkt;
spin_lock_bh(&vsi->mac_filter_hash_lock);
- *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
if (!(*vlan_list))
goto err;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 44bafedd09f2..fa6cf20da911 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -132,6 +132,30 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
}
/**
+ * iavf_lock_timeout - try to set bit but give up after timeout
+ * @adapter: board private structure
+ * @bit: bit to set
+ * @msecs: timeout in msecs
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int iavf_lock_timeout(struct iavf_adapter *adapter,
+ enum iavf_critical_section_t bit,
+ unsigned int msecs)
+{
+ unsigned int wait, delay = 10;
+
+ for (wait = 0; wait < msecs; wait += delay) {
+ if (!test_and_set_bit(bit, &adapter->crit_section))
+ return 0;
+
+ msleep(delay);
+ }
+
+ return -1;
+}
+
+/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
**/
@@ -1988,7 +2012,6 @@ static void iavf_watchdog_task(struct work_struct *work)
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- adapter->state = __IAVF_RESETTING;
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2102,6 +2125,10 @@ static void iavf_reset_task(struct work_struct *work)
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
+ schedule_work(&adapter->reset_task);
+ return;
+ }
while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
@@ -2308,6 +2335,8 @@ static void iavf_adminq_task(struct work_struct *work)
if (!event.msg_buf)
goto out;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
+ goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2321,6 +2350,7 @@ static void iavf_adminq_task(struct work_struct *work)
if (pending != 0)
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
} while (pending);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
@@ -3625,6 +3655,10 @@ static void iavf_init_task(struct work_struct *work)
init_task.work);
struct iavf_hw *hw = &adapter->hw;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
+ return;
+ }
switch (adapter->state) {
case __IAVF_STARTUP:
if (iavf_startup(adapter) < 0)
@@ -3637,14 +3671,14 @@ static void iavf_init_task(struct work_struct *work)
case __IAVF_INIT_GET_RESOURCES:
if (iavf_init_get_resources(adapter) < 0)
goto init_failed;
- return;
+ goto out;
default:
goto init_failed;
}
queue_delayed_work(iavf_wq, &adapter->init_task,
msecs_to_jiffies(30));
- return;
+ goto out;
init_failed:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
@@ -3653,9 +3687,11 @@ init_failed:
iavf_shutdown_adminq(hw);
adapter->state = __IAVF_STARTUP;
queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- return;
+ goto out;
}
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
+out:
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -3672,9 +3708,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
iavf_close(netdev);
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
adapter->state = __IAVF_REMOVE;
adapter->aq_required = 0;
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -3908,10 +3947,6 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
- /* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
- adapter->aq_required = 0;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3919,6 +3954,13 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
+
+ /* Shut down all the garbage mashers on the detention level */
+ adapter->state = __IAVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_misc_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ef8d1815af56..33916ed9e874 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6558,12 +6558,12 @@ event_after:
}
/**
- * ice_do_ioctl - Access the hwtstamp interface
+ * ice_eth_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
* @ifr: interface request data
* @cmd: ioctl command
*/
-static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
@@ -7229,7 +7229,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_do_ioctl = ice_do_ioctl,
+ .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index e63ee3cca5ea..1277c5c7d099 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -492,6 +492,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
**/
static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
{
+ int failed_cnt = 3;
bool is_failed;
int i;
@@ -502,9 +503,12 @@ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
is_failed = true;
array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
wrfl();
- break;
}
}
+ if (is_failed && --failed_cnt <= 0) {
+ hw_dbg("Failed to update MTA_REGISTER, too many retries");
+ break;
+ }
} while (is_failed);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 636a1b1fb7e1..17f5c003c3df 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2343,8 +2343,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *igb_gstrings_test,
- IGB_TEST_LEN*ETH_GSTRING_LEN);
+ memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 171a7a629b20..751de06019a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2991,7 +2991,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
.ndo_change_mtu = igb_change_mtu,
- .ndo_do_ioctl = igb_ioctl,
+ .ndo_eth_ioctl = igb_ioctl,
.ndo_tx_timeout = igb_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 1bbe9862a758..d32e72d953c8 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2657,7 +2657,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_set_rx_mode = igbvf_set_rx_mode,
.ndo_set_mac_address = igbvf_set_mac,
.ndo_change_mtu = igbvf_change_mtu,
- .ndo_do_ioctl = igbvf_ioctl,
+ .ndo_eth_ioctl = igbvf_ioctl,
.ndo_tx_timeout = igbvf_tx_timeout,
.ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 5901ed9fb545..a0ecfe5a4078 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -33,6 +33,8 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_N_PEROUT 2
#define IGC_N_SDP 4
+#define MAX_FLEX_FILTER 32
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -476,18 +478,28 @@ struct igc_q_vector {
};
enum igc_filter_match_flags {
- IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
- IGC_FILTER_FLAG_VLAN_TCI = 0x2,
- IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
- IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+ IGC_FILTER_FLAG_ETHER_TYPE = BIT(0),
+ IGC_FILTER_FLAG_VLAN_TCI = BIT(1),
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2),
+ IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
+ IGC_FILTER_FLAG_USER_DATA = BIT(4),
+ IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
};
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
+ __be16 vlan_etype;
u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
+ u8 user_data[8];
+ u8 user_mask[8];
+ u8 flex_index;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
};
struct igc_nfc_rule {
@@ -495,12 +507,24 @@ struct igc_nfc_rule {
struct igc_nfc_filter filter;
u32 location;
u16 action;
+ bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
- * based, and 8 ethertype based.
+/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
+ * based, 8 ethertype based and 32 Flex filter based rules.
*/
-#define IGC_MAX_RXNFC_RULES 32
+#define IGC_MAX_RXNFC_RULES 64
+
+struct igc_flex_filter {
+ u8 index;
+ u8 data[128];
+ u8 mask[16];
+ u8 length;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
+};
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index d0700d48ecf9..84f142f5e472 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I225_I_PHY_ID:
- phy->type = igc_phy_i225;
- break;
- default:
- ret_val = -IGC_ERR_PHY;
- goto out;
- }
+ phy->type = igc_phy_i225;
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c3a5a5518790..c6315690e20f 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -17,11 +17,22 @@
#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
/* Wake Up Filter Control */
-#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */
+#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */
+#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */
+#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */
+#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */
+#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */
+#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */
+#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */
+#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */
+
+#define IGC_WUFC_FILTER_MASK GENMASK(23, 14)
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -46,6 +57,37 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
+/* Wakeup Filter Control Extended */
+#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */
+#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */
+#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */
+#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */
+#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */
+#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */
+#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */
+#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */
+#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */
+#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */
+#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */
+#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */
+#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */
+#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */
+#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */
+#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */
+#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */
+#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */
+#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */
+#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */
+#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */
+#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */
+#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */
+#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */
+
+#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
+
+/* Physical Func Reset Done Indication */
+#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index fa4171860623..d3e84416248e 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -979,6 +979,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) {
+ fsp->flow_type |= FLOW_EXT;
+ memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data));
+ }
+
mutex_unlock(&adapter->nfc_rule_lock);
return 0;
@@ -1215,6 +1221,30 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
ether_addr_copy(rule->filter.dst_addr,
fsp->h_u.ether_spec.h_dest);
}
+
+ /* VLAN etype matching */
+ if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
+ rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
+ }
+
+ /* Check for user defined data */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ (fsp->h_ext.data[0] || fsp->h_ext.data[1])) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA;
+ memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data));
+ memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
+ }
+
+ /* When multiple filter options or user data or vlan etype is set, use a
+ * flex filter.
+ */
+ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
+ (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ (rule->filter.match_flags & (rule->filter.match_flags - 1)))
+ rule->flex = true;
+ else
+ rule->flex = false;
}
/**
@@ -1244,11 +1274,6 @@ static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
- if (flags & (flags - 1)) {
- netdev_dbg(dev, "Rule with multiple matches not supported\n");
- return -EOPNOTSUPP;
- }
-
list_for_each_entry(tmp, &adapter->nfc_rule_list, list) {
if (!memcmp(&rule->filter, &tmp->filter,
sizeof(rule->filter)) &&
@@ -1280,12 +1305,6 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EOPNOTSUPP;
}
- if ((fsp->flow_type & FLOW_EXT) &&
- fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
- netdev_dbg(netdev, "VLAN mask not supported\n");
- return -EOPNOTSUPP;
- }
-
if (fsp->ring_cookie >= adapter->num_rx_queues) {
netdev_dbg(netdev, "Invalid action\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e29aadbc6744..b7aab35c1132 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3075,11 +3075,320 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
etype);
}
+static int igc_flex_filter_select(struct igc_adapter *adapter,
+ struct igc_flex_filter *input,
+ u32 *fhft)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 fhft_index;
+ u32 fhftsl;
+
+ if (input->index >= MAX_FLEX_FILTER) {
+ dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ return -EINVAL;
+ }
+
+ /* Indirect table select register */
+ fhftsl = rd32(IGC_FHFTSL);
+ fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
+ switch (input->index) {
+ case 0 ... 7:
+ fhftsl |= 0x00;
+ break;
+ case 8 ... 15:
+ fhftsl |= 0x01;
+ break;
+ case 16 ... 23:
+ fhftsl |= 0x02;
+ break;
+ case 24 ... 31:
+ fhftsl |= 0x03;
+ break;
+ }
+ wr32(IGC_FHFTSL, fhftsl);
+
+ /* Normalize index down to host table register */
+ fhft_index = input->index % 8;
+
+ *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
+ IGC_FHFT_EXT(fhft_index - 4);
+
+ return 0;
+}
+
+static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
+ struct igc_flex_filter *input)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct igc_hw *hw = &adapter->hw;
+ u8 *data = input->data;
+ u8 *mask = input->mask;
+ u32 queuing;
+ u32 fhft;
+ u32 wufc;
+ int ret;
+ int i;
+
+ /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
+ * out early to avoid surprises later.
+ */
+ if (input->length % 8 != 0) {
+ dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ /* Select corresponding flex filter register and get base for host table. */
+ ret = igc_flex_filter_select(adapter, input, &fhft);
+ if (ret)
+ return ret;
+
+ /* When adding a filter globally disable flex filter feature. That is
+ * recommended within the datasheet.
+ */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+
+ /* Configure filter */
+ queuing = input->length & IGC_FHFT_LENGTH_MASK;
+ queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
+ queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+
+ if (input->immediate_irq)
+ queuing |= IGC_FHFT_IMM_INT;
+
+ if (input->drop)
+ queuing |= IGC_FHFT_DROP;
+
+ wr32(fhft + 0xFC, queuing);
+
+ /* Write data (128 byte) and mask (128 bit) */
+ for (i = 0; i < 16; ++i) {
+ const size_t data_idx = i * 8;
+ const size_t row_idx = i * 16;
+ u32 dw0 =
+ (data[data_idx + 0] << 0) |
+ (data[data_idx + 1] << 8) |
+ (data[data_idx + 2] << 16) |
+ (data[data_idx + 3] << 24);
+ u32 dw1 =
+ (data[data_idx + 4] << 0) |
+ (data[data_idx + 5] << 8) |
+ (data[data_idx + 6] << 16) |
+ (data[data_idx + 7] << 24);
+ u32 tmp;
+
+ /* Write row: dw0, dw1 and mask */
+ wr32(fhft + row_idx, dw0);
+ wr32(fhft + row_idx + 4, dw1);
+
+ /* mask is only valid for MASK(7, 0) */
+ tmp = rd32(fhft + row_idx + 8);
+ tmp &= ~GENMASK(7, 0);
+ tmp |= mask[i];
+ wr32(fhft + row_idx + 8, tmp);
+ }
+
+ /* Enable filter. */
+ wufc |= IGC_WUFC_FLEX_HQ;
+ if (input->index > 8) {
+ /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
+
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc |= (IGC_WUFC_FLX0 << input->index);
+ }
+ wr32(IGC_WUFC, wufc);
+
+ dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
+ input->index);
+
+ return 0;
+}
+
+static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
+ const void *src, unsigned int offset,
+ size_t len, const void *mask)
+{
+ int i;
+
+ /* data */
+ memcpy(&flex->data[offset], src, len);
+
+ /* mask */
+ for (i = 0; i < len; ++i) {
+ const unsigned int idx = i + offset;
+ const u8 *ptr = mask;
+
+ if (mask) {
+ if (ptr[i] & 0xff)
+ flex->mask[idx / 8] |= BIT(idx % 8);
+
+ continue;
+ }
+
+ flex->mask[idx / 8] |= BIT(idx % 8);
+ }
+}
+
+static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+ int i;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ for (i = 0; i < MAX_FLEX_FILTER; i++) {
+ if (i < 8) {
+ if (!(wufc & (IGC_WUFC_FLX0 << i)))
+ return i;
+ } else {
+ if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ if (wufc & IGC_WUFC_FILTER_MASK)
+ return true;
+
+ if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
+ return true;
+
+ return false;
+}
+
+static int igc_add_flex_filter(struct igc_adapter *adapter,
+ struct igc_nfc_rule *rule)
+{
+ struct igc_flex_filter flex = { };
+ struct igc_nfc_filter *filter = &rule->filter;
+ unsigned int eth_offset, user_offset;
+ int ret, index;
+ bool vlan;
+
+ index = igc_find_avail_flex_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ /* Construct the flex filter:
+ * -> dest_mac [6]
+ * -> src_mac [6]
+ * -> tpid [2]
+ * -> vlan tci [2]
+ * -> ether type [2]
+ * -> user data [8]
+ * -> = 26 bytes => 32 length
+ */
+ flex.index = index;
+ flex.length = 32;
+ flex.rx_queue = rule->action;
+
+ vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
+ eth_offset = vlan ? 16 : 12;
+ user_offset = vlan ? 18 : 14;
+
+ /* Add destination MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
+ ETH_ALEN, NULL);
+
+ /* Add source MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
+ ETH_ALEN, NULL);
+
+ /* Add VLAN etype */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
+ igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
+ sizeof(filter->vlan_etype),
+ NULL);
+
+ /* Add VLAN TCI */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
+ sizeof(filter->vlan_tci), NULL);
+
+ /* Add Ether type */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ __be16 etype = cpu_to_be16(filter->etype);
+
+ igc_flex_filter_add_field(&flex, &etype, eth_offset,
+ sizeof(etype), NULL);
+ }
+
+ /* Add user data */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
+ igc_flex_filter_add_field(&flex, &filter->user_data,
+ user_offset,
+ sizeof(filter->user_data),
+ filter->user_mask);
+
+ /* Add it down to the hardware and enable it. */
+ ret = igc_write_flex_filter_ll(adapter, &flex);
+ if (ret)
+ return ret;
+
+ filter->flex_index = index;
+
+ return 0;
+}
+
+static void igc_del_flex_filter(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc;
+
+ /* Just disable the filter. The filter table itself is kept
+ * intact. Another flex_filter_add() should override the "old" data
+ * then.
+ */
+ if (reg_index > 8) {
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc = rd32(IGC_WUFC);
+
+ wufc &= ~(IGC_WUFC_FLX0 << reg_index);
+ wr32(IGC_WUFC, wufc);
+ }
+
+ if (igc_flex_filter_in_use(adapter))
+ return;
+
+ /* No filters are in use, we may disable flex filters */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
- const struct igc_nfc_rule *rule)
+ struct igc_nfc_rule *rule)
{
int err;
+ if (rule->flex) {
+ return igc_add_flex_filter(adapter, rule);
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
err = igc_add_etype_filter(adapter, rule->filter.etype,
rule->action);
@@ -3116,6 +3425,11 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
static void igc_disable_nfc_rule(struct igc_adapter *adapter,
const struct igc_nfc_rule *rule)
{
+ if (rule->flex) {
+ igc_del_flex_filter(adapter, rule->filter.flex_index);
+ return;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
igc_del_etype_filter(adapter, rule->filter.etype);
@@ -4811,6 +5125,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
*/
static int igc_request_msix(struct igc_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
int i = 0, err = 0, vector = 0, free_vector = 0;
struct net_device *netdev = adapter->netdev;
@@ -4819,7 +5134,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -4898,20 +5219,12 @@ bool igc_has_link(struct igc_adapter *adapter)
* false until the igc_check_for_link establishes link
* for copper adapters ONLY
*/
- switch (hw->phy.media_type) {
- case igc_media_type_copper:
- if (!hw->mac.get_link_status)
- return true;
- hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- break;
- default:
- case igc_media_type_unknown:
- break;
- }
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
@@ -4999,7 +5312,9 @@ static void igc_watchdog_task(struct work_struct *work)
adapter->tx_timeout_factor = 14;
break;
case SPEED_100:
- /* maybe add some timeout factor ? */
+ case SPEED_1000:
+ case SPEED_2500:
+ adapter->tx_timeout_factor = 7;
break;
}
@@ -5698,7 +6013,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_do_ioctl = igc_ioctl,
+ .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 83aeb5e7076f..5cad31c3c7b0 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
return ret_val;
}
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID)
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 0f82990567d9..828c3501c448 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -67,6 +67,9 @@
/* Filtering Registers */
#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */
+#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */
+#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */
/* ETQF register bit definitions */
#define IGC_ETQF_FILTER_ENABLE BIT(26)
@@ -75,6 +78,19 @@
#define IGC_ETQF_QUEUE_MASK 0x00070000
#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+/* FHFT register bit definitions */
+#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0)
+#define IGC_FHFT_QUEUE_SHIFT 8
+#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8)
+#define IGC_FHFT_PRIO_SHIFT 16
+#define IGC_FHFT_PRIO_MASK GENMASK(18, 16)
+#define IGC_FHFT_IMM_INT BIT(24)
+#define IGC_FHFT_DROP BIT(25)
+
+/* FHFTSL register bit definitions */
+#define IGC_FHFTSL_FTSL_SHIFT 0
+#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0)
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
/* RSS Random Key - RW Array */
@@ -240,6 +256,7 @@
#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */
/* Wake Up packet memory */
#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 174103c4bea6..4dbbb8a32ce9 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -52,7 +52,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
- wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 14aea40da50f..24e06ba6f5e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10247,7 +10247,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
- .ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_eth_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f1b9284e0bea..1251b74fe0e2 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2901,7 +2901,7 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_open = jme_open,
.ndo_stop = jme_close,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = jme_ioctl,
+ .ndo_eth_ioctl = jme_ioctl,
.ndo_start_xmit = jme_start_xmit,
.ndo_set_mac_address = jme_set_macaddr,
.ndo_set_rx_mode = jme_set_multi,
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index b30a45725374..3e9f324f1061 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1272,7 +1272,7 @@ static const struct net_device_ops korina_netdev_ops = {
.ndo_start_xmit = korina_send_packet,
.ndo_set_rx_mode = korina_multicast_list,
.ndo_tx_timeout = korina_tx_timeout,
- .ndo_do_ioctl = korina_ioctl,
+ .ndo_eth_ioctl = korina_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 2d0c52f7106b..62f8c5212182 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -609,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d207bfcaf31d..6502c5c2ebca 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3060,7 +3060,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mv643xx_eth_ioctl,
+ .ndo_eth_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_set_features = mv643xx_eth_set_features,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 76a7777c746d..ff8db311963c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2666,7 +2666,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
return 0;
if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
- pr_info("*** Is this even possible???!?!?\n");
+ pr_info("*** Is this even possible?\n");
return 0;
}
@@ -3832,12 +3832,20 @@ static void mvneta_validate(struct phylink_config *config,
struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_QSGMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !phy_interface_mode_is_rgmii(state->interface)) {
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
+ * When in 802.3z mode, we must have AN enabled:
+ * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ */
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ if (!phylink_test(state->advertising, Autoneg)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+ } else if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
return;
}
@@ -4986,7 +4994,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu,
.ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
- .ndo_do_ioctl = mvneta_ioctl,
+ .ndo_eth_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp,
.ndo_xdp_xmit = mvneta_xdp_xmit,
.ndo_setup_tc = mvneta_setup_tc,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3229bafa2a2c..99bd8b8aa0e2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5702,7 +5702,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_mac_address = mvpp2_set_mac_address,
.ndo_change_mtu = mvpp2_change_mtu,
.ndo_get_stats64 = mvpp2_get_stats64,
- .ndo_do_ioctl = mvpp2_ioctl,
+ .ndo_eth_ioctl = mvpp2_ioctl,
.ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
.ndo_set_features = mvpp2_set_features,
@@ -6269,6 +6269,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
if (!mvpp2_port_supports_rgmii(port))
goto empty_set;
break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (!phylink_test(state->advertising, Autoneg))
+ goto empty_set;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 64aa7d350df1..6af97ce69443 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -14,6 +14,8 @@
#include <linux/tracepoint.h>
#include <linux/pci.h>
+#include "mbox.h"
+
TRACE_EVENT(otx2_msg_alloc,
TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
TP_ARGS(pdev, id, size),
@@ -25,8 +27,8 @@ TRACE_EVENT(otx2_msg_alloc,
__entry->id = id;
__entry->size = size;
),
- TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
- __entry->id, __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
);
TRACE_EVENT(otx2_msg_send,
@@ -88,8 +90,8 @@ TRACE_EVENT(otx2_msg_process,
__entry->id = id;
__entry->err = err;
),
- TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
- __entry->id, __entry->err)
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2c24944a4dba..22b7af029ebf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2331,7 +2331,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
- .ndo_do_ioctl = otx2_ioctl,
+ .ndo_eth_ioctl = otx2_ioctl,
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 226f4ff29f6e..7c569c1abefc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -746,7 +746,8 @@ static int prestera_netdev_port_event(struct net_device *lower,
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(upper)) {
if (info->linking)
- return prestera_bridge_port_join(upper, port);
+ return prestera_bridge_port_join(upper, port,
+ extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 0b3e8f2db294..be01ec8284e6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -480,7 +480,8 @@ err_port_flood_set:
}
int prestera_bridge_port_join(struct net_device *br_dev,
- struct prestera_port *port)
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
@@ -500,6 +501,11 @@ int prestera_bridge_port_join(struct net_device *br_dev,
goto err_brport_create;
}
+ err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
+ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
if (bridge->vlan_enabled)
return 0;
@@ -510,6 +516,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
return 0;
err_port_join:
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+err_switchdev_offload:
prestera_bridge_port_put(br_port);
err_brport_create:
prestera_bridge_put(bridge);
@@ -584,6 +592,8 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
else
prestera_bridge_1d_port_leave(br_port);
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+
prestera_hw_port_learning_set(port, false);
prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
index a91bc35d235f..0e93fda3d9a5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -8,7 +8,8 @@ int prestera_switchdev_init(struct prestera_switch *sw);
void prestera_switchdev_fini(struct prestera_switch *sw);
int prestera_bridge_port_join(struct net_device *br_dev,
- struct prestera_port *port);
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack);
void prestera_bridge_port_leave(struct net_device *br_dev,
struct prestera_port *port);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 9b48ae4bac39..fab53c9b8380 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1377,7 +1377,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
.ndo_set_mac_address = pxa168_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_change_mtu = pxa168_eth_change_mtu,
.ndo_tx_timeout = pxa168_eth_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index d4bb27ba1419..150c06ee3627 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3787,7 +3787,7 @@ static const struct net_device_ops skge_netdev_ops = {
.ndo_open = skge_up,
.ndo_stop = skge_down,
.ndo_start_xmit = skge_xmit_frame,
- .ndo_do_ioctl = skge_ioctl,
+ .ndo_eth_ioctl = skge_ioctl,
.ndo_get_stats = skge_get_stats,
.ndo_tx_timeout = skge_tx_timeout,
.ndo_change_mtu = skge_change_mtu,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 8b8bff59c8fe..743ca96527fa 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4693,7 +4693,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_open = sky2_open,
.ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
- .ndo_do_ioctl = sky2_ioctl,
+ .ndo_eth_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_rx_mode = sky2_set_multicast,
@@ -4710,7 +4710,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_open = sky2_open,
.ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
- .ndo_do_ioctl = sky2_ioctl,
+ .ndo_eth_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_rx_mode = sky2_set_multicast,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 64adfd24e134..398c23cec815 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2933,7 +2933,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_start_xmit = mtk_start_xmit,
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_eth_ioctl = mtk_do_ioctl,
.ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 96d2891f1675..1d5dd2015453 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1162,7 +1162,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
.ndo_start_xmit = mtk_star_netdev_start_xmit,
.ndo_get_stats64 = mtk_star_netdev_get_stats64,
.ndo_set_rx_mode = mtk_star_set_rx_mode,
- .ndo_do_ioctl = mtk_star_netdev_ioctl,
+ .ndo_eth_ioctl = mtk_star_netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5d0c9c62382d..a2f61a87cef8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2828,7 +2828,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
- .ndo_do_ioctl = mlx4_en_ioctl,
+ .ndo_eth_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b5072a3a2585..6378dc815df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -27,7 +27,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/rqt.o en/tir.o \
+ en/rx_res.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b1b51bbba054..35668986878a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -58,6 +58,7 @@
#include "en/qos.h"
#include "lib/hv_vhca.h"
#include "lib/clock.h"
+#include "en/rx_res.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -126,7 +127,6 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
@@ -139,10 +139,7 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
-#define MLX5E_LOG_INDIR_RQT_SIZE 0x8
-#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -745,29 +742,11 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-struct mlx5e_rqt {
- u32 rqtn;
- bool enabled;
-};
-
-struct mlx5e_tir {
- u32 tirn;
- struct mlx5e_rqt rqt;
- struct list_head list;
-};
-
enum {
MLX5E_TC_PRIO = 0,
MLX5E_NIC_PRIO
};
-struct mlx5e_rss_params {
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
- u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
- u8 toeplitz_hash_key[40];
- u8 hfunc;
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -837,13 +816,7 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
- struct mlx5e_rqt indir_rqt;
- struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_tir ptp_tir;
- struct mlx5e_rss_params rss_params;
+ struct mlx5e_rx_res *rx_res;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
@@ -948,24 +921,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-struct mlx5e_redirect_rqt_param {
- bool is_rss;
- union {
- u32 rqn; /* Direct RQN (Non-RSS) */
- struct {
- u8 hfunc;
- struct mlx5e_channels *channels;
- } rss; /* RSS data */
- };
-};
-
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
- struct mlx5e_redirect_rqt_param rrp);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
- const struct mlx5e_tirc_config *ttconfig,
- void *tirc, bool inner);
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
-struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
+int mlx5e_modify_tirs_hash(struct mlx5e_priv *priv);
struct mlx5e_xsk_param;
@@ -1065,10 +1021,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
-int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
- u32 *in);
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1089,11 +1041,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1106,7 +1057,6 @@ int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
-int mlx5e_bits_invert(unsigned long a, int size);
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index bc33eaada3b9..86e079310ac3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -55,19 +55,15 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
- if (dl_port->registered)
- devlink_port_unregister(dl_port);
+ devlink_port_unregister(dl_port);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct devlink_port *port;
if (!netif_device_present(dev))
return NULL;
- port = mlx5e_devlink_get_dl_port(priv);
- if (port->registered)
- return port;
- return NULL;
+
+ return mlx5e_devlink_get_dl_port(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1d5ce07b83f4..0e053aab12b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -83,12 +83,6 @@ enum mlx5e_traffic_types {
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
-struct mlx5e_tirc_config {
- u8 l3_prot_type;
- u8 l4_prot_type;
- u32 rx_hash_fields;
-};
-
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
@@ -160,6 +154,8 @@ enum {
MLX5E_INNER_TTC_GROUP2_SIZE +\
MLX5E_INNER_TTC_GROUP3_SIZE)
+struct mlx5e_priv;
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
@@ -248,18 +244,12 @@ struct ttc_params {
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
struct mlx5_flow_destination *new_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 2cbf18c967f7..3cbb596821e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -167,6 +167,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
+{
+ struct mlx5e_lro_param lro_param;
+
+ lro_param = (struct mlx5e_lro_param) {
+ .enabled = params->lro_en,
+ .timeout = params->lro_timeout,
+ };
+
+ return lro_param;
+}
+
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index e9593f5f0661..879ad46d754e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -11,6 +11,11 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
+struct mlx5e_lro_param {
+ bool enabled;
+ u32 timeout;
+};
+
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -120,6 +125,7 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index efef4adce086..8ff8b02c056f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -606,8 +606,8 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ u32 tirn = priv->rx_res->ptp.tir.tirn;
struct mlx5_flow_handle *rule;
- u32 tirn = priv->ptp_tir.tirn;
int err;
if (ptp_fs->valid)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
new file mode 100644
index 000000000000..38d0e9dbd6bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rqt.h"
+#include <linux/mlx5/transobj.h>
+
+static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u16 max_size, u32 *init_rqns, u16 init_size)
+{
+ void *rqtc;
+ int inlen;
+ int err;
+ u32 *in;
+ int i;
+
+ rqt->mdev = mdev;
+ rqt->size = max_size;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
+ for (i = 0; i < init_size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn)
+{
+ u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1;
+
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+ return inv;
+}
+
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ unsigned int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ unsigned int ix = i;
+
+ if (hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE));
+
+ ix = indir->table[ix];
+
+ if (WARN_ON(ix >= num_rqns))
+ /* Could be a bug in the driver or in the kernel part of
+ * ethtool: indir table refers to non-existent RQs.
+ */
+ return -EINVAL;
+ rss_rqns[i] = rqns[ix];
+ }
+
+ return 0;
+}
+
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
+
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
+{
+ mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
+}
+
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+{
+ unsigned int i;
+ void *rqtc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+{
+ return mlx5e_rqt_redirect(rqt, &rqn, 1);
+}
+
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE))
+ return -EINVAL;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
new file mode 100644
index 000000000000..d2c76649efb0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RQT_H__
+#define __MLX5_EN_RQT_H__
+
+#include <linux/kernel.h>
+
+#define MLX5E_INDIR_RQT_SIZE (1 << 8)
+
+struct mlx5_core_dev;
+
+struct mlx5e_rss_params_indir {
+ u32 table[MLX5E_INDIR_RQT_SIZE];
+};
+
+struct mlx5e_rqt {
+ struct mlx5_core_dev *mdev;
+ u32 rqtn;
+ u16 size;
+};
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn);
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
+
+static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
+{
+ return rqt->rqtn;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+
+#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
new file mode 100644
index 000000000000..8fc1dfc4e830
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rx_res.h"
+
+static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
+ [MLX5E_TT_IPV4_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+ [MLX5E_TT_IPV6] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+};
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5e_traffic_types tt)
+{
+ return rss_default_config[tt];
+}
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rx_res_rss_get_current_tt_config(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+
+ rss_tt = mlx5e_rss_get_default_tt_config(tt);
+ rss_tt.rx_hash_fields = res->rss_params.rx_hash_fields[tt];
+ return rss_tt;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
new file mode 100644
index 000000000000..068e48140a6f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RX_RES_H__
+#define __MLX5_EN_RX_RES_H__
+
+#include <linux/kernel.h>
+#include "rqt.h"
+#include "tir.h"
+#include "fs.h"
+
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
+
+struct mlx5e_rss_params {
+ struct mlx5e_rss_params_hash hash;
+ struct mlx5e_rss_params_indir indir;
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
+};
+
+struct mlx5e_rx_res {
+ struct mlx5e_rss_params rss_params;
+
+ struct mlx5e_rqt indir_rqt;
+ struct {
+ struct mlx5e_tir indir_tir;
+ struct mlx5e_tir inner_indir_tir;
+ } rss[MLX5E_NUM_INDIR_TIRS];
+
+ struct {
+ struct mlx5e_rqt direct_rqt;
+ struct mlx5e_tir direct_tir;
+ struct mlx5e_rqt xsk_rqt;
+ struct mlx5e_tir xsk_tir;
+ } channels[MLX5E_MAX_NUM_CHANNELS];
+
+ struct {
+ struct mlx5e_rqt rqt;
+ struct mlx5e_tir tir;
+ } ptp;
+};
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5e_traffic_types tt);
+struct mlx5e_rss_params_traffic_type
+mlx5e_rx_res_rss_get_current_tt_config(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt);
+
+#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
new file mode 100644
index 000000000000..de936dc4bc48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "tir.h"
+#include "params.h"
+#include <linux/mlx5/transobj.h>
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+
+/* max() doesn't work inside square brackets. */
+#define MLX5E_TIR_CMD_IN_SZ_DW ( \
+ MLX5_ST_SZ_DW(create_tir_in) > MLX5_ST_SZ_DW(modify_tir_in) ? \
+ MLX5_ST_SZ_DW(create_tir_in) : MLX5_ST_SZ_DW(modify_tir_in) \
+)
+
+struct mlx5e_tir_builder {
+ u32 in[MLX5E_TIR_CMD_IN_SZ_DW];
+ bool modify;
+};
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
+{
+ struct mlx5e_tir_builder *builder;
+
+ builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
+ builder->modify = modify;
+
+ return builder;
+}
+
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder)
+{
+ kvfree(builder);
+}
+
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder)
+{
+ memset(builder->in, 0, sizeof(builder->in));
+}
+
+static void *mlx5e_tir_builder_get_tirc(struct mlx5e_tir_builder *builder)
+{
+ if (builder->modify)
+ return MLX5_ADDR_OF(modify_tir_in, builder->in, ctx);
+ return MLX5_ADDR_OF(create_tir_in, builder->in, ctx);
+}
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+}
+
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
+}
+
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ const unsigned int rough_max_l2_l3_hdr_sz = 256;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
+
+ if (!lro_param->enabled)
+ return;
+
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+}
+
+static int mlx5e_hfunc_to_hw(u8 hfunc)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return MLX5_RX_HASH_FN_TOEPLITZ;
+ case ETH_RSS_HASH_XOR:
+ return MLX5_RX_HASH_FN_INVERTED_XOR8;
+ default:
+ return MLX5_RX_HASH_FN_NONE;
+ }
+}
+
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ void *hfso;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.hash, 1);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_hfunc_to_hw(rss_hash->hfunc));
+ if (rss_hash->hfunc == ETH_RSS_HASH_TOP) {
+ const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
+ }
+
+ if (inner)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, rss_tt->l3_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, rss_tt->l4_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss_tt->rx_hash_fields);
+}
+
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, tls_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+}
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg)
+{
+ int err;
+
+ tir->mdev = mdev;
+
+ err = mlx5_core_create_tir(tir->mdev, builder->in, &tir->tirn);
+ if (err)
+ return err;
+
+ if (reg) {
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ mutex_lock(&res->td.list_lock);
+ list_add(&tir->list, &res->td.tirs_list);
+ mutex_unlock(&res->td.list_lock);
+ } else {
+ INIT_LIST_HEAD(&tir->list);
+ }
+
+ return 0;
+}
+
+void mlx5e_tir_destroy(struct mlx5e_tir *tir)
+{
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ /* Skip mutex if list_del is no-op (the TIR wasn't registered in the
+ * list). list_empty will never return true for an item of tirs_list,
+ * and READ_ONCE/WRITE_ONCE in list_empty/list_del guarantee consistency
+ * of the list->next value.
+ */
+ if (!list_empty(&tir->list)) {
+ mutex_lock(&res->td.list_lock);
+ list_del(&tir->list);
+ mutex_unlock(&res->td.list_lock);
+ }
+
+ mlx5_core_destroy_tir(tir->mdev, tir->tirn);
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder)
+{
+ return mlx5_core_modify_tir(tir->mdev, tir->tirn, builder->in);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
new file mode 100644
index 000000000000..e45149a78ed9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_TIR_H__
+#define __MLX5_EN_TIR_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_rss_params_hash {
+ u8 hfunc;
+ u8 toeplitz_hash_key[40];
+};
+
+struct mlx5e_rss_params_traffic_type {
+ u8 l3_prot_type;
+ u8 l4_prot_type;
+ u32 rx_hash_fields;
+};
+
+struct mlx5e_tir_builder;
+struct mlx5e_lro_param;
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support);
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner);
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
+
+struct mlx5_core_dev;
+
+struct mlx5e_tir {
+ struct mlx5_core_dev *mdev;
+ u32 tirn;
+ struct list_head list;
+};
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg);
+void mlx5e_tir_destroy(struct mlx5e_tir *tir);
+
+static inline u32 mlx5e_tir_get_tirn(struct mlx5e_tir *tir)
+{
+ return tir->tirn;
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder);
+
+#endif /* __MLX5_EN_TIR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 7f94508594fb..d54607a42740 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -92,30 +92,19 @@ static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
u32 rqn)
{
- void *tirc;
- int inlen;
- u32 *in;
+ struct mlx5e_tir_builder *builder;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn, rqn);
- err = mlx5e_create_tir(mdev, tir, in);
- kvfree(in);
+ mlx5e_tir_builder_build_inline(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqn);
+ err = mlx5e_tir_init(tir, builder, mdev, true);
- return err;
-}
+ mlx5e_tir_builder_free(builder);
-static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
-{
- mlx5e_destroy_tir(mdev, tir);
+ return err;
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
@@ -173,7 +162,7 @@ err_napi_del:
void mlx5e_close_trap(struct mlx5e_trap *trap)
{
- mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
netif_napi_del(&trap->napi);
kvfree(trap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index a8315f166696..ab485d082729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -184,28 +184,14 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
/* TX queue is disabled on close. */
}
-static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
-{
- struct mlx5e_redirect_rqt_param direct_rrp = {
- .is_rss = false,
- {
- .rqn = rqn,
- },
- };
-
- u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
-
- return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
-}
-
int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
{
- return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
+ return mlx5e_rqt_redirect_direct(&priv->rx_res->channels[c->ix].xsk_rqt, c->xskrq.rqn);
}
int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
{
- return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
+ return mlx5e_rqt_redirect_direct(&priv->rx_res->channels[ix].xsk_rqt, priv->drop_rq.rqn);
}
int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 4e58fade7a60..bfdbc3060755 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
- u32 tirn;
+ struct mlx5e_tir tir;
u32 key_id;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
@@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
+static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
{
- int err, inlen;
- void *tirc;
- u32 *in;
+ struct mlx5e_tir_builder *builder;
+ int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
- MLX5_SET(tirc, tirc, tls_en, 1);
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+ mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
+ mlx5e_tir_builder_build_direct(builder);
+ mlx5e_tir_builder_build_tls(builder);
+ err = mlx5e_tir_init(tir, builder, mdev, false);
- err = mlx5_core_create_tir(mdev, in, tirn);
+ mlx5e_tir_builder_free(builder);
- kvfree(in);
return err;
}
@@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work)
goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
- priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
accel_rule->rule = rule;
out:
@@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
- priv_rx->tirn, priv_rx->key_id,
- priv_rx->resync.seq, false,
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ priv_rx->key_id, priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
@@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
- mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
- next_record_tcp_sn,
+ mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn,
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ false, next_record_tcp_sn,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
@@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
psv = &wqe->psv;
psv->num_psv = 1 << 4;
psv->l_key = sq->channel->mkey_be;
- psv->psv_index[0] = cpu_to_be32(priv_rx->tirn);
+ psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir));
psv->va = cpu_to_be64(buf->dma_addr);
wi = (struct mlx5e_icosq_wqe_info) {
@@ -635,9 +628,9 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rqtn = priv->direct_tir[rxq].rqt.rqtn;
+ rqtn = mlx5e_rqt_get_rqtn(&priv->rx_res->channels[rxq].direct_rqt);
- err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
+ err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
if (err)
goto err_create_tir;
@@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
return 0;
err_post_wqes:
- mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
err_create_key:
@@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
- mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ mlx5e_tir_destroy(&priv_rx->tir);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
/* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 25403af32859..db6c6a96a6c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -192,7 +192,6 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
- struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5e_traffic_types tt;
@@ -209,7 +208,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
/* FIXME: Must use mlx5e_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
- dest.tir_num = tir[tt].tirn;
+ dest.tir_num = priv->rx_res->rss[tt].indir_tir.tirn;
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
@@ -553,7 +552,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+ dest.tir_num = priv->rx_res->channels[arfs_rule->rxq].direct_tir.tirn;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -576,7 +575,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst.tir_num = priv->direct_tir[rxq].tirn;
+ dst.tir_num = priv->rx_res->channels[rxq].direct_tir.tirn;
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 8c166ee56d8b..c4db367d4baf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -36,33 +36,6 @@
* Global resources are common to all the netdevices crated on the same nic.
*/
-int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
-{
- struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
- int err;
-
- err = mlx5_core_create_tir(mdev, in, &tir->tirn);
- if (err)
- return err;
-
- mutex_lock(&res->td.list_lock);
- list_add(&tir->list, &res->td.tirs_list);
- mutex_unlock(&res->td.list_lock);
-
- return 0;
-}
-
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir)
-{
- struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
-
- mutex_lock(&res->td.list_lock);
- mlx5_core_destroy_tir(mdev, tir->tirn);
- list_del(&tir->list);
- mutex_unlock(&res->td.list_lock);
-}
-
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bd72572e03d1..9264d18b0964 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1172,7 +1172,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
- return sizeof(priv->rss_params.toeplitz_hash_key);
+ return sizeof(priv->rx_res->rss_params.hash.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
@@ -1198,18 +1198,18 @@ int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
+ struct mlx5e_rss_params *rss;
+
+ rss = &priv->rx_res->rss_params;
if (indir)
- memcpy(indir, rss->indirection_rqt,
- sizeof(rss->indirection_rqt));
+ memcpy(indir, rss->indir.table, sizeof(rss->indir.table));
if (key)
- memcpy(key, rss->toeplitz_hash_key,
- sizeof(rss->toeplitz_hash_key));
+ memcpy(key, rss->hash.toeplitz_hash_key, sizeof(rss->hash.toeplitz_hash_key));
if (hfunc)
- *hfunc = rss->hfunc;
+ *hfunc = rss->hash.hfunc;
return 0;
}
@@ -1218,63 +1218,57 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
- int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ struct mlx5e_rss_params *rss;
bool refresh_tirs = false;
bool refresh_rqt = false;
- void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
(hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
mutex_lock(&priv->state_lock);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
- rss->hfunc = hfunc;
+ rss = &priv->rx_res->rss_params;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hash.hfunc) {
+ rss->hash.hfunc = hfunc;
refresh_rqt = true;
refresh_tirs = true;
}
if (indir) {
- memcpy(rss->indirection_rqt, indir,
- sizeof(rss->indirection_rqt));
+ memcpy(rss->indir.table, indir, sizeof(rss->indir.table));
refresh_rqt = true;
}
if (key) {
- memcpy(rss->toeplitz_hash_key, key,
- sizeof(rss->toeplitz_hash_key));
- refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP;
+ memcpy(rss->hash.toeplitz_hash_key, key, sizeof(rss->hash.toeplitz_hash_key));
+ refresh_tirs = refresh_tirs || rss->hash.hfunc == ETH_RSS_HASH_TOP;
}
if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_redirect_rqt_param rrp = {
- .is_rss = true,
- {
- .rss = {
- .hfunc = rss->hfunc,
- .channels = &priv->channels,
- },
- },
- };
- u32 rqtn = priv->indir_rqt.rqtn;
-
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+ u32 *rqns;
+
+ rqns = kvmalloc_array(priv->channels.num, sizeof(*rqns), GFP_KERNEL);
+ if (rqns) {
+ unsigned int ix;
+
+ for (ix = 0; ix < priv->channels.num; ix++)
+ rqns[ix] = priv->channels.c[ix]->rq.rqn;
+
+ mlx5e_rqt_redirect_indir(&priv->rx_res->indir_rqt, rqns,
+ priv->channels.num,
+ rss->hash.hfunc, &rss->indir);
+ kvfree(rqns);
+ }
}
if (refresh_tirs)
- mlx5e_modify_tirs_hash(priv, in);
+ mlx5e_modify_tirs_hash(priv);
mutex_unlock(&priv->state_lock);
- kvfree(in);
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0b75fab41ae8..e79815763edf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1320,11 +1320,11 @@ err:
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
struct ttc_params *ttc_params)
{
- ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params->any_tt_tirn = priv->rx_res->channels[0].direct_tir.tirn;
ttc_params->inner_ttc = &priv->fs.inner_ttc;
}
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
@@ -1343,15 +1343,12 @@ void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
ft_attr->prio = MLX5E_NIC_PRIO;
}
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc)
+static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return 0;
-
ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1374,12 +1371,9 @@ err:
return err;
}
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc)
+static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return;
-
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
@@ -1788,20 +1782,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
}
mlx5e_set_ttc_basic_params(priv, &ttc_params);
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
- if (err) {
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
- err);
- goto err_destroy_arfs_tables;
+ if (mlx5e_tunnel_inner_ft_supported(priv->mdev)) {
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].inner_indir_tir.tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
}
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+ ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].indir_tir.tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
@@ -1839,7 +1836,8 @@ err_destroy_l2_table:
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
+ if (mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1852,7 +1850,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
+ if (mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index b416a8ee2eed..494f6f832407 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -421,11 +421,9 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
} else {
struct mlx5e_params *params = &priv->channels.params;
enum mlx5e_rq_group group;
- struct mlx5e_tir *tir;
u16 ix;
mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
- tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir;
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
@@ -434,7 +432,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
}
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst->tir_num = tir[ix].tirn;
+ if (group == MLX5E_RQ_GROUP_XSK)
+ dst->tir_num = priv->rx_res->channels[ix].xsk_tir.tirn;
+ else
+ dst->tir_num = priv->rx_res->channels[ix].direct_tir.tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
@@ -816,10 +817,8 @@ static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
enum mlx5e_traffic_types tt;
u8 rx_hash_field = 0;
- void *in;
tt = flow_type_to_traffic_type(nfc->flow_type);
if (tt == MLX5E_NUM_INDIR_TIRS)
@@ -848,21 +847,16 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
if (nfc->data & RXH_L4_B_2_3)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
mutex_lock(&priv->state_lock);
- if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
+ if (rx_hash_field == priv->rx_res->rss_params.rx_hash_fields[tt])
goto out;
- priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
- mlx5e_modify_tirs_hash(priv, in);
+ priv->rx_res->rss_params.rx_hash_fields[tt] = rx_hash_field;
+ mlx5e_modify_tirs_hash(priv);
out:
mutex_unlock(&priv->state_lock);
- kvfree(in);
return 0;
}
@@ -876,7 +870,7 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
if (tt == MLX5E_NUM_INDIR_TIRS)
return -EINVAL;
- hash_field = priv->rss_params.rx_hash_fields[tt];
+ hash_field = priv->rx_res->rss_params.rx_hash_fields[tt];
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 37c440837945..3e644d3955a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2194,61 +2194,25 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
-static int
-mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- int inlen;
- int err;
- u32 *in;
- int i;
-
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
-
- for (i = 0; i < sz; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
-
- err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
- if (!err)
- rqt->enabled = true;
-
- kvfree(in);
- return err;
-}
-
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
-{
- rqt->enabled = false;
- mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
-}
-
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
{
- struct mlx5e_rqt *rqt = &priv->indir_rqt;
int err;
- err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
+ err = mlx5e_rqt_init_direct(&priv->rx_res->indir_rqt, priv->mdev, true,
+ priv->drop_rq.rqn);
if (err)
mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
return err;
}
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
{
int err;
int ix;
- for (ix = 0; ix < n; ix++) {
- err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
+ for (ix = 0; ix < priv->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&priv->rx_res->channels[ix].direct_rqt,
+ priv->mdev, false, priv->drop_rq.rqn);
if (unlikely(err))
goto err_destroy_rqts;
}
@@ -2256,337 +2220,175 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, in
return 0;
err_destroy_rqts:
- mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
- for (ix--; ix >= 0; ix--)
- mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
+ mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&priv->rx_res->channels[ix].direct_rqt);
return err;
}
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
+static int mlx5e_create_xsk_rqts(struct mlx5e_priv *priv)
{
- int i;
-
- for (i = 0; i < n; i++)
- mlx5e_destroy_rqt(priv, &tirs[i].rqt);
-}
+ int err;
+ int ix;
-static int mlx5e_rx_hash_fn(int hfunc)
-{
- return (hfunc == ETH_RSS_HASH_TOP) ?
- MLX5_RX_HASH_FN_TOEPLITZ :
- MLX5_RX_HASH_FN_INVERTED_XOR8;
-}
+ for (ix = 0; ix < priv->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&priv->rx_res->channels[ix].xsk_rqt,
+ priv->mdev, false, priv->drop_rq.rqn);
+ if (unlikely(err))
+ goto err_destroy_rqts;
+ }
-int mlx5e_bits_invert(unsigned long a, int size)
-{
- int inv = 0;
- int i;
+ return 0;
- for (i = 0; i < size; i++)
- inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+err_destroy_rqts:
+ mlx5_core_warn(priv->mdev, "create xsk rqts failed, %d\n", err);
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&priv->rx_res->channels[ix].xsk_rqt);
- return inv;
+ return err;
}
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
- struct mlx5e_redirect_rqt_param rrp, void *rqtc)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
{
- int i;
-
- for (i = 0; i < sz; i++) {
- u32 rqn;
-
- if (rrp.is_rss) {
- int ix = i;
-
- if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, ilog2(sz));
+ unsigned int ix;
- ix = priv->rss_params.indirection_rqt[ix];
- rqn = rrp.rss.channels->c[ix]->rq.rqn;
- } else {
- rqn = rrp.rqn;
- }
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
+ for (ix = 0; ix < priv->max_nch; ix++)
+ mlx5e_rqt_destroy(&priv->rx_res->channels[ix].direct_rqt);
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
- struct mlx5e_redirect_rqt_param rrp)
+static void mlx5e_destroy_xsk_rqts(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- int inlen;
- u32 *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
- mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
- err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
+ unsigned int ix;
- kvfree(in);
- return err;
+ for (ix = 0; ix < priv->max_nch; ix++)
+ mlx5e_rqt_destroy(&priv->rx_res->channels[ix].xsk_rqt);
}
-static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
- struct mlx5e_redirect_rqt_param rrp)
+static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs)
{
- if (!rrp.is_rss)
- return rrp.rqn;
-
- if (ix >= rrp.rss.channels->num)
- return priv->drop_rq.rqn;
-
- return rrp.rss.channels->c[ix]->rq.rqn;
-}
+ struct mlx5e_rx_res *res = priv->rx_res;
+ unsigned int ix;
+ u32 *rqns;
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
- struct mlx5e_redirect_rqt_param rrp,
- struct mlx5e_redirect_rqt_param *ptp_rrp)
-{
- u32 rqtn;
- int ix;
+ rqns = kvmalloc_array(chs->num, sizeof(*rqns), GFP_KERNEL);
+ if (rqns) {
+ for (ix = 0; ix < chs->num; ix++)
+ rqns[ix] = chs->c[ix]->rq.rqn;
- if (priv->indir_rqt.enabled) {
- /* RSS RQ table */
- rqtn = priv->indir_rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+ mlx5e_rqt_redirect_indir(&res->indir_rqt, rqns, chs->num,
+ res->rss_params.hash.hfunc,
+ &res->rss_params.indir);
+ kvfree(rqns);
}
for (ix = 0; ix < priv->max_nch; ix++) {
- struct mlx5e_redirect_rqt_param direct_rrp = {
- .is_rss = false,
- {
- .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
- },
- };
+ u32 rqn = priv->drop_rq.rqn;
- /* Direct RQ Tables */
- if (!priv->direct_tir[ix].rqt.enabled)
- continue;
+ if (ix < chs->num)
+ rqn = chs->c[ix]->rq.rqn;
- rqtn = priv->direct_tir[ix].rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
- }
- if (ptp_rrp) {
- rqtn = priv->ptp_tir.rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp);
+ mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
}
-}
-static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *chs)
-{
- bool rx_ptp_support = priv->profile->rx_ptp_support;
- struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL;
- struct mlx5e_redirect_rqt_param rrp = {
- .is_rss = true,
- {
- .rss = {
- .channels = chs,
- .hfunc = priv->rss_params.hfunc,
- }
- },
- };
- struct mlx5e_redirect_rqt_param ptp_rrp;
+ if (priv->profile->rx_ptp_support) {
+ u32 rqn;
- if (rx_ptp_support) {
- u32 ptp_rqn;
+ if (mlx5e_ptp_get_rqn(priv->channels.ptp, &rqn))
+ rqn = priv->drop_rq.rqn;
- ptp_rrp.is_rss = false;
- ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ?
- priv->drop_rq.rqn : ptp_rqn;
- ptp_rrp_p = &ptp_rrp;
+ mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
}
- mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p);
}
static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
{
- bool rx_ptp_support = priv->profile->rx_ptp_support;
- struct mlx5e_redirect_rqt_param drop_rrp = {
- .is_rss = false,
- {
- .rqn = priv->drop_rq.rqn,
- },
- };
-
- mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL);
-}
-
-static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
- [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP,
- },
- [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP,
- },
-};
-
-struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
-{
- return tirc_default_config[tt];
-}
+ struct mlx5e_rx_res *res = priv->rx_res;
+ unsigned int ix;
-static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
-{
- if (!params->lro_en)
- return;
-
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
+ mlx5e_rqt_redirect_direct(&res->indir_rqt, priv->drop_rq.rqn);
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
-}
-
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
- const struct mlx5e_tirc_config *ttconfig,
- void *tirc, bool inner)
-{
- void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
- MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
- MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
- if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
- void *rss_key = MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key);
- size_t len = MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key);
-
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, rss_params->toeplitz_hash_key, len);
- }
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- ttconfig->l3_prot_type);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- ttconfig->l4_prot_type);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- ttconfig->rx_hash_fields);
-}
+ for (ix = 0; ix < priv->max_nch; ix++)
+ mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, priv->drop_rq.rqn);
-static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
- enum mlx5e_traffic_types tt,
- u32 rx_hash_fields)
-{
- *ttconfig = tirc_default_config[tt];
- ttconfig->rx_hash_fields = rx_hash_fields;
+ if (priv->profile->rx_ptp_support)
+ mlx5e_rqt_redirect_direct(&res->ptp.rqt, priv->drop_rq.rqn);
}
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
+int mlx5e_modify_tirs_hash(struct mlx5e_priv *priv)
{
- void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- struct mlx5e_rss_params *rss = &priv->rss_params;
- struct mlx5_core_dev *mdev = priv->mdev;
- int ctxlen = MLX5_ST_SZ_BYTES(tirc);
- struct mlx5e_tirc_config ttconfig;
- int tt;
+ struct mlx5e_rss_params_hash *rss_hash = &priv->rx_res->rss_params.hash;
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_rx_res *res = priv->rx_res;
+ struct mlx5e_tir_builder *builder;
+ enum mlx5e_traffic_types tt;
- MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_update_rx_hash_fields(&ttconfig, tt,
- rss->rx_hash_fields[tt]);
- mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
+ rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
+ mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
+ mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
+ mlx5e_tir_builder_clear(builder);
}
/* Verify inner tirs resources allocated */
- if (!priv->inner_indir_tir[0].tirn)
- return;
+ if (!res->rss[0].inner_indir_tir.tirn)
+ goto out;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_update_rx_hash_fields(&ttconfig, tt,
- rss->rx_hash_fields[tt]);
- mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
+ rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
+ mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, true);
+ mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
+ mlx5e_tir_builder_clear(builder);
}
+
+out:
+ mlx5e_tir_builder_free(builder);
+ return 0;
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
- void *in;
- void *tirc;
- int inlen;
+ struct mlx5e_rx_res *res = priv->rx_res;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_lro_param lro_param;
+ enum mlx5e_traffic_types tt;
int err;
- int tt;
int ix;
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
return -ENOMEM;
- MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
- tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ mlx5e_tir_builder_build_lro(builder, &lro_param);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
+ err = mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
if (err)
- goto free_in;
+ goto err_free_builder;
+
+ /* Verify inner tirs resources allocated */
+ if (!res->rss[0].inner_indir_tir.tirn)
+ continue;
+
+ err = mlx5e_tir_modify(&res->rss[tt].inner_indir_tir, builder);
+ if (err)
+ goto err_free_builder;
}
for (ix = 0; ix < priv->max_nch; ix++) {
- err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
+ err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err)
- goto free_in;
+ goto err_free_builder;
}
-free_in:
- kvfree(in);
-
+err_free_builder:
+ mlx5e_tir_builder_free(builder);
return err;
}
@@ -2768,8 +2570,9 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ /* This function may be called on attach, before priv->rx_res is created. */
+ if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
+ mlx5e_build_default_indir_rqt(priv->rx_res->rss_params.indir.table,
MLX5E_INDIR_RQT_SIZE, count);
return 0;
@@ -2829,16 +2632,19 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
- mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
- mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
+ if (priv->rx_res) {
+ mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
+ mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
+ }
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
-
- mlx5e_redirect_rqts_to_drop(priv);
+ if (priv->rx_res) {
+ mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
+ mlx5e_redirect_rqts_to_drop(priv);
+ }
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -3213,159 +3019,192 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
- u32 rqtn, u32 *tirc)
-{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
- MLX5_SET(tirc, tirc, tunneled_offload_en,
- priv->channels.params.tunneled_offload_en);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-}
-
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
-{
- mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, false);
-}
-
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
-{
- mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-}
-
-static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
-{
- mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, true);
-}
-
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
- struct mlx5e_tir *tir;
- void *tirc;
- int inlen;
- int i = 0;
- int err;
- u32 *in;
- int tt;
+ struct mlx5e_rss_params_hash *rss_hash = &priv->rx_res->rss_params.hash;
+ bool inner_ft_support = priv->channels.params.tunneled_offload_en;
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_rx_res *res = priv->rx_res;
+ enum mlx5e_traffic_types tt, max_tt;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_lro_param lro_param;
+ u32 indir_rqtn;
+ int err = 0;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ indir_rqtn = mlx5e_rqt_get_rqtn(&res->indir_rqt);
+
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(in, 0, inlen);
- tir = &priv->indir_tir[tt];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_indir_tir_ctx(priv, tt, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
+ mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
+ indir_rqtn, inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, &lro_param);
+ rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
+ mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
+
+ err = mlx5e_tir_init(&res->rss[tt].indir_tir, builder, priv->mdev, true);
if (err) {
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
- goto err_destroy_inner_tirs;
+ goto err_destroy_tirs;
}
+
+ mlx5e_tir_builder_clear(builder);
}
if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
goto out;
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
- memset(in, 0, inlen);
- tir = &priv->inner_indir_tir[i];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
+ indir_rqtn, inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, &lro_param);
+ rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
+ mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, true);
+
+ err = mlx5e_tir_init(&res->rss[tt].inner_indir_tir, builder, priv->mdev, true);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
}
+
+ mlx5e_tir_builder_clear(builder);
}
+ goto out;
+
+err_destroy_inner_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
+
+ tt = MLX5E_NUM_INDIR_TIRS;
+err_destroy_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_tir_destroy(&res->rss[tt].indir_tir);
+
out:
- kvfree(in);
+ mlx5e_tir_builder_free(builder);
- return 0;
+ return err;
+}
-err_destroy_inner_tirs:
- for (i--; i >= 0; i--)
- mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
+static int mlx5e_create_direct_tir(struct mlx5e_priv *priv, struct mlx5e_tir *tir,
+ struct mlx5e_tir_builder *builder, struct mlx5e_rqt *rqt)
+{
+ bool inner_ft_support = priv->channels.params.tunneled_offload_en;
+ struct mlx5e_lro_param lro_param;
+ int err = 0;
- for (tt--; tt >= 0; tt--)
- mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
- kvfree(in);
+ mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(rqt), inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, &lro_param);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(tir, builder, priv->mdev, true);
+ if (unlikely(err))
+ mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
+
+ mlx5e_tir_builder_clear(builder);
return err;
}
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
- struct mlx5e_tir *tir;
- void *tirc;
- int inlen;
+ struct mlx5e_rx_res *res = priv->rx_res;
+ struct mlx5e_tir_builder *builder;
int err = 0;
- u32 *in;
int ix;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- for (ix = 0; ix < n; ix++) {
- memset(in, 0, inlen);
- tir = &tirs[ix];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
- if (unlikely(err))
- goto err_destroy_ch_tirs;
+ for (ix = 0; ix < priv->max_nch; ix++) {
+ err = mlx5e_create_direct_tir(priv, &res->channels[ix].direct_tir,
+ builder, &res->channels[ix].direct_rqt);
+ if (err)
+ goto err_destroy_tirs;
}
goto out;
-err_destroy_ch_tirs:
- mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
- for (ix--; ix >= 0; ix--)
- mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
+err_destroy_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].direct_tir);
out:
- kvfree(in);
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
+
+static int mlx5e_create_xsk_tirs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rx_res *res = priv->rx_res;
+ struct mlx5e_tir_builder *builder;
+ int err;
+ int ix;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ for (ix = 0; ix < priv->max_nch; ix++) {
+ err = mlx5e_create_direct_tir(priv, &res->channels[ix].xsk_tir,
+ builder, &res->channels[ix].xsk_rqt);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ goto out;
+
+err_destroy_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
+
+out:
+ mlx5e_tir_builder_free(builder);
return err;
}
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
- int i;
+ struct mlx5e_rx_res *res = priv->rx_res;
+ enum mlx5e_traffic_types tt;
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5e_tir_destroy(&res->rss[tt].indir_tir);
/* Verify inner tirs resources allocated */
- if (!priv->inner_indir_tir[0].tirn)
+ if (!res->rss[0].inner_indir_tir.tirn)
return;
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
}
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
- int i;
+ unsigned int ix;
- for (i = 0; i < n; i++)
- mlx5e_destroy_tir(priv->mdev, &tirs[i]);
+ for (ix = 0; ix < priv->max_nch; ix++)
+ mlx5e_tir_destroy(&priv->rx_res->channels[ix].direct_tir);
+}
+
+static void mlx5e_destroy_xsk_tirs(struct mlx5e_priv *priv)
+{
+ unsigned int ix;
+
+ for (ix = 0; ix < priv->max_nch; ix++)
+ mlx5e_tir_destroy(&priv->rx_res->channels[ix].xsk_tir);
}
static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
@@ -4591,7 +4430,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_nic_mtu,
- .ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_eth_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
@@ -4646,19 +4485,18 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
{
enum mlx5e_traffic_types tt;
- rss_params->hfunc = ETH_RSS_HASH_TOP;
- netdev_rss_key_fill(rss_params->toeplitz_hash_key,
- sizeof(rss_params->toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
+ rss_params->hash.hfunc = ETH_RSS_HASH_TOP;
+ netdev_rss_key_fill(rss_params->hash.toeplitz_hash_key,
+ sizeof(rss_params->hash.toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(rss_params->indir.table,
MLX5E_INDIR_RQT_SIZE, num_channels);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
rss_params->rx_hash_fields[tt] =
- tirc_default_config[tt].rx_hash_fields;
+ mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
}
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
- struct mlx5e_rss_params *rss_params = &priv->rss_params;
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
@@ -4718,10 +4556,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- /* RSS */
- mlx5e_build_rss_params(rss_params, params->num_channels);
- params->tunneled_offload_en =
- mlx5e_tunnel_inner_ft_supported(mdev);
+ params->tunneled_offload_en = mlx5e_tunnel_inner_ft_supported(mdev);
/* AF_XDP */
params->xsk = xsk;
@@ -4821,7 +4656,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ /* Tunneled LRO is not supported in the driver, and the same RQs are
+ * shared between inner and outer TIRs, so the driver can't disable LRO
+ * for inner TIRs while having it enabled for outer TIRs. Due to this,
+ * block LRO altogether if the firmware declares tunneled LRO support.
+ */
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
+ !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
+ !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev))
netdev->vlan_features |= NETIF_F_LRO;
@@ -4948,7 +4790,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct devlink_port *dl_port;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -4964,19 +4805,13 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- dl_port = mlx5e_devlink_get_dl_port(priv);
- if (dl_port->registered)
- mlx5e_health_create_reporters(priv);
-
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- if (dl_port->registered)
- mlx5e_health_destroy_reporters(priv);
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
@@ -4984,9 +4819,15 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
+ struct mlx5e_tir_builder *tir_builder;
int err;
+ priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
+ if (!priv->rx_res)
+ return -ENOMEM;
+
+ mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
+
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -4999,7 +4840,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
@@ -5007,23 +4848,31 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch);
+ err = mlx5e_create_xsk_rqts(priv);
if (unlikely(err))
goto err_destroy_direct_tirs;
- err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch);
+ err = mlx5e_create_xsk_tirs(priv);
if (unlikely(err))
goto err_destroy_xsk_rqts;
- err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1);
+ err = mlx5e_rqt_init_direct(&priv->rx_res->ptp.rqt, priv->mdev, false,
+ priv->drop_rq.rqn);
if (err)
goto err_destroy_xsk_tirs;
- err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1);
+ tir_builder = mlx5e_tir_builder_alloc(false);
+ if (!tir_builder) {
+ err = -ENOMEM;
+ goto err_destroy_ptp_rqt;
+ }
+ err = mlx5e_create_direct_tir(priv, &priv->rx_res->ptp.tir, tir_builder,
+ &priv->rx_res->ptp.rqt);
+ mlx5e_tir_builder_free(tir_builder);
if (err)
goto err_destroy_ptp_rqt;
@@ -5052,45 +4901,47 @@ err_tc_nic_cleanup:
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
err_destroy_ptp_direct_tir:
- mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
+ mlx5e_tir_destroy(&priv->rx_res->ptp.tir);
err_destroy_ptp_rqt:
- mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
+ mlx5e_rqt_destroy(&priv->rx_res->ptp.rqt);
err_destroy_xsk_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_xsk_tirs(priv);
err_destroy_xsk_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_xsk_rqts(priv);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
- mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_tir_destroy(&priv->rx_res->ptp.tir);
+ mlx5e_rqt_destroy(&priv->rx_res->ptp.rqt);
+ mlx5e_destroy_xsk_tirs(priv);
+ mlx5e_destroy_xsk_rqts(priv);
+ mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_destroy_direct_rqts(priv);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
}
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index bf94bcb6fa5d..2c54951c240d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -585,9 +585,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->tunneled_offload_en = false;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
-
- /* RSS */
- mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev,
@@ -650,6 +647,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_rx_res *res = priv->rx_res;
struct ttc_params ttc_params = {};
int tt, err;
@@ -657,7 +655,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
- ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params.any_tt_tirn = res->channels[0].direct_tir.tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
if (rep->vport != MLX5_VPORT_UPLINK)
@@ -665,7 +663,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+ ttc_params.indir_tirn[tt] = res->rss[tt].indir_tir.tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
@@ -760,9 +758,14 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
int err;
+ priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
+ if (!priv->rx_res)
+ return -ENOMEM;
+
+ mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
+
mlx5e_init_l2_addr(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -775,7 +778,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
@@ -783,7 +786,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
@@ -808,31 +811,33 @@ err_destroy_root_ft:
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_destroy_direct_rqts(priv);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
}
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d273758255c3..2ef02fea119a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -340,11 +340,11 @@ struct mlx5e_hairpin {
struct mlx5_core_dev *func_mdev;
struct mlx5e_priv *func_priv;
u32 tdn;
- u32 tirn;
+ struct mlx5e_tir direct_tir;
int num_channels;
struct mlx5e_rqt indir_rqt;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_ttc_table ttc;
};
@@ -482,126 +482,100 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
- u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
- void *tirc;
+ struct mlx5e_tir_builder *builder;
int err;
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
if (err)
- goto alloc_tdn_err;
-
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
- MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ goto out;
- err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
+ mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
+ err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
if (err)
goto create_tir_err;
- return 0;
+out:
+ mlx5e_tir_builder_free(builder);
+ return err;
create_tir_err:
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
-alloc_tdn_err:
- return err;
+
+ goto out;
}
static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
{
- mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5e_tir_destroy(&hp->direct_tir);
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
-static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
-{
- struct mlx5e_priv *priv = hp->func_priv;
- int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
- u32 *indirection_rqt, rqn;
-
- indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
- if (!indirection_rqt)
- return -ENOMEM;
-
- mlx5e_build_default_indir_rqt(indirection_rqt, sz,
- hp->num_channels);
-
- for (i = 0; i < sz; i++) {
- ix = i;
- if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, ilog2(sz));
- ix = indirection_rqt[ix];
- rqn = hp->pair->rqn[ix];
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
-
- kfree(indirection_rqt);
- return 0;
-}
-
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
- int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- u32 *in;
+ struct mlx5e_rss_params_indir *indir;
+ int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
+ if (!indir)
return -ENOMEM;
- rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
-
- err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
- if (err)
- goto out;
-
- err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
- if (!err)
- hp->indir_rqt.enabled = true;
+ mlx5e_build_default_indir_rqt(indir->table, MLX5E_INDIR_RQT_SIZE, hp->num_channels);
+ err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
+ priv->rx_res->rss_params.hash.hfunc, indir);
-out:
- kvfree(in);
+ kvfree(indir);
return err;
}
static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
- u32 in[MLX5_ST_SZ_DW(create_tir_in)];
- int tt, i, err;
- void *tirc;
+ struct mlx5e_rss_params_hash *rss_hash;
+ enum mlx5e_traffic_types tt, max_tt;
+ struct mlx5e_tir_builder *builder;
+ int err = 0;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ rss_hash = &priv->rx_res->rss_params.hash;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
+ struct mlx5e_rss_params_traffic_type rss_tt;
- memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ rss_tt = mlx5e_rss_get_default_tt_config(tt);
- MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
+ mlx5e_tir_builder_build_rqt(builder, hp->tdn,
+ mlx5e_rqt_get_rqtn(&hp->indir_rqt),
+ false);
+ mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
- err = mlx5_core_create_tir(hp->func_mdev, in,
- &hp->indir_tirn[tt]);
+ err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
}
+
+ mlx5e_tir_builder_clear(builder);
}
- return 0;
-err_destroy_tirs:
- for (i = 0; i < tt; i++)
- mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+out:
+ mlx5e_tir_builder_free(builder);
return err;
+
+err_destroy_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_tir_destroy(&hp->indir_tir[tt]);
+
+ goto out;
}
static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
@@ -609,7 +583,7 @@ static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
int tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+ mlx5e_tir_destroy(&hp->indir_tir[tt]);
}
static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
@@ -620,10 +594,10 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->any_tt_tirn = hp->tirn;
+ ttc_params->any_tt_tirn = mlx5e_tir_get_tirn(&hp->direct_tir);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+ ttc_params->indir_tirn[tt] = mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
@@ -657,7 +631,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
err_create_ttc_table:
mlx5e_hairpin_destroy_indirect_tirs(hp);
err_create_indirect_tirs:
- mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+ mlx5e_rqt_destroy(&hp->indir_rqt);
return err;
}
@@ -668,7 +642,7 @@ static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
mlx5e_destroy_ttc_table(priv, &hp->ttc);
mlx5e_hairpin_destroy_indirect_tirs(hp);
- mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+ mlx5e_rqt_destroy(&hp->indir_rqt);
}
static struct mlx5e_hairpin *
@@ -903,7 +877,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
- hp->tirn, hp->pair->rqn[0],
+ mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
@@ -912,7 +886,7 @@ attach_flow:
flow_flag_set(flow, HAIRPIN_RSS);
flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
- flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
}
flow->hpe = hpe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 7d7ed025db0d..a126cbc6f0d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -50,7 +50,7 @@ static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
.ndo_change_mtu = mlx5i_change_mtu,
- .ndo_do_ioctl = mlx5i_ioctl,
+ .ndo_eth_ioctl = mlx5i_ioctl,
};
/* IPoIB mlx5 netdev profile */
@@ -331,32 +331,19 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
}
mlx5e_set_ttc_basic_params(priv, &ttc_params);
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
-
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
- if (err) {
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
- err);
- goto err_destroy_arfs_tables;
- }
-
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+ ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].indir_tir.tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
- goto err_destroy_inner_ttc_table;
+ goto err_destroy_arfs_tables;
}
return 0;
-err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -366,16 +353,20 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
int err;
+ priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
+ if (!priv->rx_res)
+ return -ENOMEM;
+
+ mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
+
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -388,15 +379,15 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
- err = mlx5e_create_indirect_tirs(priv, true);
+ err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
+ err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
@@ -407,31 +398,33 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return 0;
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5i_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_destroy_direct_rqts(priv);
+ mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
+ kvfree(priv->rx_res);
+ priv->rx_res = NULL;
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 18ee21b06a00..5308f23702bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -149,7 +149,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_get_stats64 = mlx5i_get_stats,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
- .ndo_do_ioctl = mlx5i_pkey_ioctl,
+ .ndo_eth_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index a0a059e0154f..d22219613719 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -199,7 +199,7 @@ static int mlxbf_gige_stop(struct net_device *netdev)
return 0;
}
-static int mlxbf_gige_do_ioctl(struct net_device *netdev,
+static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
@@ -253,7 +253,7 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_start_xmit = mlxbf_gige_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mlxbf_gige_do_ioctl,
+ .ndo_eth_ioctl = mlxbf_gige_eth_ioctl,
.ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88699e678544..081408e892d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1207,7 +1207,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
.ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
- .ndo_do_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c5ef9aa64efe..f5d0d392efbf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -335,14 +335,16 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
- struct net_device *brport_dev)
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
if (!bridge_port)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
bridge_port->lagged = mlxsw_sp_port->lagged;
@@ -359,12 +361,23 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
+ err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
return bridge_port;
+
+err_switchdev_offload:
+ list_del(&bridge_port->list);
+ kfree(bridge_port);
+ return ERR_PTR(err);
}
static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
{
+ switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
list_del(&bridge_port->list);
WARN_ON(!list_empty(&bridge_port->vlans_list));
kfree(bridge_port);
@@ -390,9 +403,10 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
- bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
- if (!bridge_port) {
- err = -ENOMEM;
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
+ extack);
+ if (IS_ERR(bridge_port)) {
+ err = PTR_ERR(bridge_port);
goto err_bridge_port_create;
}
@@ -1569,7 +1583,6 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
{
long *flood_bitmap;
int num_of_ports;
- int alloc_size;
u16 mid_idx;
int err;
@@ -1579,18 +1592,17 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
return false;
num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
- flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
+ flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
if (!flood_bitmap)
return false;
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
+ bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
mid->mid = mid_idx;
err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
bridge_device->mrouter);
- kfree(flood_bitmap);
+ bitmap_free(flood_bitmap);
if (err)
return false;
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 831518466de2..3f69bb59ba49 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -689,7 +689,7 @@ static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
static const struct net_device_ops ks8851_netdev_ops = {
.ndo_open = ks8851_net_open,
.ndo_stop = ks8851_net_stop,
- .ndo_do_ioctl = ks8851_net_ioctl,
+ .ndo_eth_ioctl = ks8851_net_ioctl,
.ndo_start_xmit = ks8851_start_xmit,
.ndo_set_mac_address = ks8851_set_mac_address,
.ndo_set_rx_mode = ks8851_set_rx_mode,
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7945eb5e2fe8..a0ee155f9f51 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6738,7 +6738,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_features = netdev_set_features,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_rx_mode = netdev_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netdev_netpoll,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dae10328c6cf..9e8561cdc32a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2655,7 +2655,7 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_open = lan743x_netdev_open,
.ndo_stop = lan743x_netdev_close,
.ndo_start_xmit = lan743x_netdev_xmit_frame,
- .ndo_do_ioctl = lan743x_netdev_ioctl,
+ .ndo_eth_ioctl = lan743x_netdev_ioctl,
.ndo_set_rx_mode = lan743x_netdev_set_multicast,
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 7bdbb2d09a14..d39ae2a6fb49 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -1,5 +1,6 @@
config SPARX5_SWITCH
tristate "Sparx5 switch driver"
+ depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index a72e3b3b596e..649ca609884a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -93,9 +93,12 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
}
static int sparx5_port_bridge_join(struct sparx5_port *port,
- struct net_device *bridge)
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
{
struct sparx5 *sparx5 = port->sparx5;
+ struct net_device *ndev = port->ndev;
+ int err;
if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
/* First bridged port */
@@ -109,12 +112,21 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
set_bit(port->portno, sparx5->bridge_mask);
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
/* Port enters in bridge mode therefor don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
- __dev_mc_unsync(port->ndev, sparx5_mc_unsync);
+ __dev_mc_unsync(ndev, sparx5_mc_unsync);
return 0;
+
+err_switchdev_offload:
+ clear_bit(port->portno, sparx5->bridge_mask);
+ return err;
}
static void sparx5_port_bridge_leave(struct sparx5_port *port,
@@ -122,6 +134,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
{
struct sparx5 *sparx5 = port->sparx5;
+ switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
+
clear_bit(port->portno, sparx5->bridge_mask);
if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
sparx5->hw_bridge_dev = NULL;
@@ -139,11 +153,15 @@ static int sparx5_port_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct sparx5_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = sparx5_port_bridge_join(port, info->upper_dev);
+ err = sparx5_port_bridge_join(port, info->upper_dev,
+ extack);
else
sparx5_port_bridge_leave(port, info->upper_dev);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index e9d260d84bf3..de900ea70fd4 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -823,7 +823,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
- .ndo_do_ioctl = ocelot_ioctl,
+ .ndo_eth_ioctl = ocelot_ioctl,
.ndo_get_devlink_port = ocelot_get_devlink_port,
};
@@ -1154,38 +1154,19 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
struct net_device *bridge_dev,
struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_port_private *priv;
clock_t ageing_time;
u8 stp_state;
- int err;
-
- priv = container_of(ocelot_port, struct ocelot_port_private, port);
ocelot_inherit_brport_flags(ocelot, port, brport_dev);
stp_state = br_port_get_stp_state(brport_dev);
ocelot_bridge_stp_state_set(ocelot, port, stp_state);
- err = ocelot_port_vlan_filtering(ocelot, port,
- br_vlan_enabled(bridge_dev));
- if (err)
- return err;
-
ageing_time = br_get_ageing_time(bridge_dev);
ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
- err = br_mdb_replay(bridge_dev, brport_dev, priv, true,
- &ocelot_switchdev_blocking_nb, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
- err = br_vlan_replay(bridge_dev, brport_dev, priv, true,
- &ocelot_switchdev_blocking_nb, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
- return 0;
+ return ocelot_port_vlan_filtering(ocelot, port,
+ br_vlan_enabled(bridge_dev));
}
static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
@@ -1216,6 +1197,13 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
ocelot_port_bridge_join(ocelot, port, bridge);
+ err = switchdev_bridge_port_offload(brport_dev, dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack);
if (err)
goto err_switchdev_sync;
@@ -1223,10 +1211,24 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
return 0;
err_switchdev_sync:
+ switchdev_bridge_port_unoffload(brport_dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb);
+err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
return err;
}
+static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
+ struct net_device *brport_dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+
+ switchdev_bridge_port_unoffload(brport_dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb);
+}
+
static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge)
@@ -1279,6 +1281,18 @@ err_bridge_join:
return err;
}
+static void ocelot_netdevice_pre_lag_leave(struct net_device *dev,
+ struct net_device *bond)
+{
+ struct net_device *bridge_dev;
+
+ bridge_dev = netdev_master_upper_dev_get(bond);
+ if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
+ return;
+
+ ocelot_netdevice_pre_bridge_leave(dev, bond);
+}
+
static int ocelot_netdevice_lag_leave(struct net_device *dev,
struct net_device *bond)
{
@@ -1356,6 +1370,43 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
}
static int
+ocelot_netdevice_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ ocelot_netdevice_pre_bridge_leave(dev, brport_dev);
+
+ if (netif_is_lag_master(info->upper_dev) && !info->linking)
+ ocelot_netdevice_pre_lag_leave(dev, info->upper_dev);
+
+ return NOTIFY_DONE;
+}
+
+static int
+ocelot_netdevice_lag_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+ int err = NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct ocelot_port_private *priv = netdev_priv(lower);
+ struct ocelot_port *ocelot_port = &priv->port;
+
+ if (ocelot_port->bond != dev)
+ return NOTIFY_OK;
+
+ err = ocelot_netdevice_prechangeupper(dev, lower, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int
ocelot_netdevice_changelowerstate(struct net_device *dev,
struct netdev_lag_lower_state_info *info)
{
@@ -1382,6 +1433,17 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
+ case NETDEV_PRECHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (ocelot_netdevice_dev_check(dev))
+ return ocelot_netdevice_prechangeupper(dev, dev, info);
+
+ if (netif_is_lag_master(dev))
+ return ocelot_netdevice_lag_prechangeupper(dev, info);
+
+ break;
+ }
case NETDEV_CHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 51b4b25d15ad..bd9d026e609d 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -790,7 +790,7 @@ static const struct net_device_ops natsemi_netdev_ops = {
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = natsemi_change_mtu,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = ns_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 0b017d4f5c08..09c0e839cca5 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7625,7 +7625,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = s2io_ndo_set_multicast,
- .ndo_do_ioctl = s2io_ioctl,
+ .ndo_eth_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
.ndo_set_features = s2io_set_features,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 82eef4c72f01..20fb4ad29865 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3339,7 +3339,7 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = vxge_set_multicast,
- .ndo_do_ioctl = vxge_ioctl,
+ .ndo_eth_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
.ndo_fix_features = vxge_fix_features,
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index b82758d5beed..8844d1ac053a 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -23,6 +23,7 @@ config NFP
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select CRC32
+ select DIMLIB
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1cbe2c9f3959..2a432de11858 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
}
static bool
-nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
+nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
{
- struct flow_action_entry *act = flow->rule->action.entries;
- int num_act = flow->rule->action.num_entries;
+ struct flow_action_entry *act = rule->action.entries;
+ int num_act = rule->action.num_entries;
int act_idx;
/* Preparse action list for next mirred or redirect action */
@@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
@@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app,
/* Determine the tunnel type based on the egress netdev
* in the mirred action for tunnels without l4.
*/
- if (nfp_flower_tun_is_gre(flow, act_idx))
+ if (nfp_flower_tun_is_gre(rule, act_idx))
return NFP_FL_TUNNEL_GRE;
switch (tun->key.tp_dst) {
@@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts {
};
static int
-nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
+nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action,
int *a_len, struct nfp_flower_pedit_acts *set_act,
u32 *csum_updated)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
@@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
static int
nfp_fl_pedit(const struct flow_action_entry *act,
- struct flow_cls_offload *flow, char *nfp_action, int *a_len,
+ char *nfp_action, int *a_len,
u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
struct netlink_ext_ack *extack)
{
@@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
+ *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx);
if (*tun_type == NFP_FL_TUNNEL_NONE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
@@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
/* Tunnel decap is handled by default so accept action. */
return 0;
case FLOW_ACTION_MANGLE:
- if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
+ if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
@@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
}
int nfp_flower_compile_action(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack)
@@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
- if (!flow_action_hw_stats_check(&flow->rule->action, extack,
+ if (!flow_action_hw_stats_check(&rule->action, extack,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
@@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- flow_action_for_each(i, act, &flow->rule->action) {
- if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ flow_action_for_each(i, act, &rule->action) {
+ if (nfp_fl_check_mangle_start(&rule->action, i))
memset(&set_act, 0, sizeof(set_act));
- err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
+ err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
&set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
- if (nfp_fl_check_mangle_end(&flow->rule->action, i))
- nfp_fl_commit_mangle(flow,
+ if (nfp_fl_check_mangle_end(&rule->action, i))
+ nfp_fl_commit_mangle(rule,
&nfp_flow->action_data[act_len],
&act_len, &set_act, &csum_updated);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 062bb2db68bf..bfd7d1c35076 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021 Corigine, Inc. */
#include "conntrack.h"
+#include "../nfp_port.h"
const struct rhashtable_params nfp_tc_ct_merge_params = {
.head_offset = offsetof(struct nfp_fl_ct_tc_merge,
@@ -407,15 +408,491 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
return -EINVAL;
}
+static int
+nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
+{
+ int key_size;
+
+ /* This field must always be present */
+ key_size = sizeof(struct nfp_flower_meta_tci);
+ map[FLOW_PAY_META_TCI] = 0;
+
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
+ map[FLOW_PAY_EXT_META] = key_size;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
+ map[FLOW_PAY_INPORT] = key_size;
+ key_size += sizeof(struct nfp_flower_in_port);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
+ map[FLOW_PAY_MAC_MPLS] = key_size;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
+ map[FLOW_PAY_L4] = key_size;
+ key_size += sizeof(struct nfp_flower_tp_ports);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
+ map[FLOW_PAY_IPV4] = key_size;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
+ map[FLOW_PAY_IPV6] = key_size;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ map[FLOW_PAY_GRE] = key_size;
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
+ key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
+ else
+ key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
+ if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
+ (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
+ map[FLOW_PAY_UDP_TUN] = key_size;
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
+ key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ else
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ map[FLOW_PAY_GENEVE_OPT] = key_size;
+ key_size += sizeof(struct nfp_flower_geneve_options);
+ }
+
+ return key_size;
+}
+
+static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
+ struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct nfp_fl_payload *flow_pay)
+{
+ struct flow_action_entry *a_in;
+ int i, j, num_actions, id;
+ struct flow_rule *a_rule;
+ int err = 0, offset = 0;
+
+ num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
+ rules[CT_TYPE_NFT]->action.num_entries +
+ rules[CT_TYPE_POST_CT]->action.num_entries;
+
+ a_rule = flow_rule_alloc(num_actions);
+ if (!a_rule)
+ return -ENOMEM;
+
+ /* Actions need a BASIC dissector. */
+ a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+
+ /* Copy actions */
+ for (j = 0; j < _CT_TYPE_MAX; j++) {
+ if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ /* ip_proto is the only field that needed in later compile_action,
+ * needed to set the correct checksum flags. It doesn't really matter
+ * which input rule's ip_proto field we take as the earlier merge checks
+ * would have made sure that they don't conflict. We do not know which
+ * of the subflows would have the ip_proto filled in, so we need to iterate
+ * through the subflows and assign the proper subflow to a_rule
+ */
+ flow_rule_match_basic(rules[j], &match);
+ if (match.mask->ip_proto)
+ a_rule->match = rules[j]->match;
+ }
+
+ for (i = 0; i < rules[j]->action.num_entries; i++) {
+ a_in = &rules[j]->action.entries[i];
+ id = a_in->id;
+
+ /* Ignore CT related actions as these would already have
+ * been taken care of by previous checks, and we do not send
+ * any CT actions to the firmware.
+ */
+ switch (id) {
+ case FLOW_ACTION_CT:
+ case FLOW_ACTION_GOTO:
+ case FLOW_ACTION_CT_METADATA:
+ continue;
+ default:
+ memcpy(&a_rule->action.entries[offset++],
+ a_in, sizeof(struct flow_action_entry));
+ break;
+ }
+ }
+ }
+
+ /* Some actions would have been ignored, so update the num_entries field */
+ a_rule->action.num_entries = offset;
+ err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
+ kfree(a_rule);
+
+ return err;
+}
+
static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
{
- return 0;
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
+ struct nfp_fl_key_ls key_layer, tmp_layer;
+ struct nfp_flower_priv *priv = zt->priv;
+ u16 key_map[_FLOW_PAY_LAYERS_MAX];
+ struct nfp_fl_payload *flow_pay;
+
+ struct flow_rule *rules[_CT_TYPE_MAX];
+ u8 *key, *msk, *kdata, *mdata;
+ struct nfp_port *port = NULL;
+ struct net_device *netdev;
+ bool qinq_sup;
+ u32 port_id;
+ u16 offset;
+ int i, err;
+
+ netdev = m_entry->netdev;
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
+
+ rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
+ rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
+ rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
+
+ memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
+ memset(&key_map, 0, sizeof(key_map));
+
+ /* Calculate the resultant key layer and size for offload */
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ err = nfp_flower_calculate_key_layers(priv->app,
+ m_entry->netdev,
+ &tmp_layer, rules[i],
+ &tun_type, NULL);
+ if (err)
+ return err;
+
+ key_layer.key_layer |= tmp_layer.key_layer;
+ key_layer.key_layer_two |= tmp_layer.key_layer_two;
+ }
+ key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
+
+ flow_pay = nfp_flower_allocate_new(&key_layer);
+ if (!flow_pay)
+ return -ENOMEM;
+
+ memset(flow_pay->unmasked_data, 0, key_layer.key_size);
+ memset(flow_pay->mask_data, 0, key_layer.key_size);
+
+ kdata = flow_pay->unmasked_data;
+ mdata = flow_pay->mask_data;
+
+ offset = key_map[FLOW_PAY_META_TCI];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
+ (struct nfp_flower_meta_tci *)msk,
+ key_layer.key_layer);
+
+ if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_EXT_META];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
+ key_layer.key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_layer.key_layer_two);
+ }
+
+ /* Using in_port from the -trk rule. The tc merge checks should already
+ * be checking that the ingress netdevs are the same
+ */
+ port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
+ offset = key_map[FLOW_PAY_INPORT];
+ key = kdata + offset;
+ msk = mdata + offset;
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
+ port_id, false, tun_type, NULL);
+ if (err)
+ goto ct_offload_err;
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ port_id, true, tun_type, NULL);
+ if (err)
+ goto ct_offload_err;
+
+ /* This following part works on the assumption that previous checks has
+ * already filtered out flows that has different values for the different
+ * layers. Here we iterate through all three rules and merge their respective
+ * masked value(cared bits), basic method is:
+ * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
+ * final_mask = r1_mask | r2_mask | r3_mask
+ * If none of the rules contains a match that is also fine, that simply means
+ * that the layer is not present.
+ */
+ if (!qinq_sup) {
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ offset = key_map[FLOW_PAY_META_TCI];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
+ (struct nfp_flower_meta_tci *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_MAC_MPLS];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
+ (struct nfp_flower_mac_mpls *)msk,
+ rules[i]);
+ err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
+ (struct nfp_flower_mac_mpls *)msk,
+ rules[i], NULL);
+ if (err)
+ goto ct_offload_err;
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_IPV4];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
+ (struct nfp_flower_ipv4 *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_IPV6];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
+ (struct nfp_flower_ipv6 *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_L4];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
+ (struct nfp_flower_tp_ports *)msk,
+ rules[i]);
+ }
+ }
+
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ offset = key_map[FLOW_PAY_GRE];
+ key = kdata + offset;
+ msk = mdata + offset;
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6_gre_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
+ dst = &gre_match->ipv6.dst;
+
+ entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
+ if (!entry) {
+ err = -ENOMEM;
+ goto ct_offload_err;
+ }
+
+ flow_pay->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4_gre_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ flow_pay->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(priv->app, dst);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
+ if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ offset = key_map[FLOW_PAY_UDP_TUN];
+ key = kdata + offset;
+ msk = mdata + offset;
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6_udp_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
+ dst = &udp_match->ipv6.dst;
+
+ entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
+ if (!entry) {
+ err = -ENOMEM;
+ goto ct_offload_err;
+ }
+
+ flow_pay->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4_udp_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ flow_pay->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(priv->app, dst);
+ }
+
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ offset = key_map[FLOW_PAY_GENEVE_OPT];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++)
+ nfp_flower_compile_geneve_opt(key, msk, rules[i]);
+ }
+ }
+
+ /* Merge actions into flow_pay */
+ err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
+ if (err)
+ goto ct_offload_err;
+
+ /* Use the pointer address as the cookie, but set the last bit to 1.
+ * This is to avoid the 'is_merge_flow' check from detecting this as
+ * an already merged flow. This works since address alignment means
+ * that the last bit for pointer addresses will be 0.
+ */
+ flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
+ err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
+ flow_pay, netdev, NULL);
+ if (err)
+ goto ct_offload_err;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto ct_release_offload_meta_err;
+
+ err = nfp_flower_xmit_flow(priv->app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (err)
+ goto ct_remove_rhash_err;
+
+ m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
+ m_entry->flow_pay = flow_pay;
+
+ if (port)
+ port->tc_offload_cnt++;
+
+ return err;
+
+ct_remove_rhash_err:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &flow_pay->fl_node,
+ nfp_flower_table_params));
+ct_release_offload_meta_err:
+ nfp_modify_flow_metadata(priv->app, flow_pay);
+ct_offload_err:
+ if (flow_pay->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ kfree(flow_pay);
+ return err;
}
static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
struct net_device *netdev)
{
- return 0;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flow_pay;
+ struct nfp_port *port = NULL;
+ int err = 0;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
+ if (!flow_pay)
+ return -ENOENT;
+
+ err = nfp_modify_flow_metadata(app, flow_pay);
+ if (err)
+ goto err_free_merge_flow;
+
+ if (flow_pay->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
+
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
+
+ if (!flow_pay->in_hw) {
+ err = 0;
+ goto err_free_merge_flow;
+ }
+
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+
+err_free_merge_flow:
+ nfp_flower_del_linked_merge_flows(app, flow_pay);
+ if (port)
+ port->tc_offload_cnt--;
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &flow_pay->fl_node,
+ nfp_flower_table_params));
+ kfree_rcu(flow_pay, rcu);
+ return err;
}
static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
@@ -1048,6 +1525,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
return 0;
}
+static void
+nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
+ enum ct_entry_type type, u64 *m_pkts,
+ u64 *m_bytes, u64 *m_used)
+{
+ struct nfp_flower_priv *priv = nft_merge->zt->priv;
+ struct nfp_fl_payload *nfp_flow;
+ u32 ctx_id;
+
+ nfp_flow = nft_merge->flow_pay;
+ if (!nfp_flow)
+ return;
+
+ ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+ *m_pkts += priv->stats[ctx_id].pkts;
+ *m_bytes += priv->stats[ctx_id].bytes;
+ *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
+
+ /* If request is for a sub_flow which is part of a tunnel merged
+ * flow then update stats from tunnel merged flows first.
+ */
+ if (!list_empty(&nfp_flow->linked_flows))
+ nfp_flower_update_merge_stats(priv->app, nfp_flow);
+
+ if (type != CT_TYPE_NFT) {
+ /* Update nft cached stats */
+ flow_stats_update(&nft_merge->nft_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ } else {
+ /* Update pre_ct cached stats */
+ flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Update post_ct cached stats */
+ flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ /* Reset stats from the nfp */
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+}
+
+int nfp_fl_ct_stats(struct flow_cls_offload *flow,
+ struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
+ struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
+
+ u64 pkts = 0, bytes = 0, used = 0;
+ u64 m_pkts, m_bytes, m_used;
+
+ spin_lock_bh(&ct_entry->zt->priv->stats_lock);
+
+ if (ct_entry->type == CT_TYPE_PRE_CT) {
+ /* Iterate tc_merge entries associated with this flow */
+ list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
+ pre_ct_list) {
+ m_pkts = 0;
+ m_bytes = 0;
+ m_used = 0;
+ /* Iterate nft_merge entries associated with this tc_merge flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
+ tc_merge_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
+ &m_pkts, &m_bytes, &m_used);
+ }
+ pkts += m_pkts;
+ bytes += m_bytes;
+ used = max_t(u64, used, m_used);
+ /* Update post_ct partner */
+ flow_stats_update(&tc_merge->post_ct_parent->stats,
+ m_bytes, m_pkts, 0, m_used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ } else if (ct_entry->type == CT_TYPE_POST_CT) {
+ /* Iterate tc_merge entries associated with this flow */
+ list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
+ post_ct_list) {
+ m_pkts = 0;
+ m_bytes = 0;
+ m_used = 0;
+ /* Iterate nft_merge entries associated with this tc_merge flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
+ tc_merge_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
+ &m_pkts, &m_bytes, &m_used);
+ }
+ pkts += m_pkts;
+ bytes += m_bytes;
+ used = max_t(u64, used, m_used);
+ /* Update pre_ct partner */
+ flow_stats_update(&tc_merge->pre_ct_parent->stats,
+ m_bytes, m_pkts, 0, m_used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ } else {
+ /* Iterate nft_merge entries associated with this nft flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
+ nft_flow_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
+ &pkts, &bytes, &used);
+ }
+ }
+
+ /* Add stats from this request to stats potentially cached by
+ * previous requests.
+ */
+ flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Finally update the flow stats from the original stats request */
+ flow_stats_update(&flow->stats, ct_entry->stats.bytes,
+ ct_entry->stats.pkts, 0,
+ ct_entry->stats.lastused,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Stats has been synced to original flow, can now clear
+ * the cache.
+ */
+ ct_entry->stats.pkts = 0;
+ ct_entry->stats.bytes = 0;
+ spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
+
+ return 0;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1080,7 +1690,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
nfp_ct_map_params);
return nfp_fl_ct_del_flow(ct_map_ent);
case FLOW_CLS_STATS:
- return 0;
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent)
+ return nfp_fl_ct_stats(flow, ct_map_ent);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index 170b6cdb8cd0..beb6cceff9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -83,6 +83,24 @@ enum ct_entry_type {
CT_TYPE_PRE_CT,
CT_TYPE_NFT,
CT_TYPE_POST_CT,
+ _CT_TYPE_MAX,
+};
+
+enum nfp_nfp_layer_name {
+ FLOW_PAY_META_TCI = 0,
+ FLOW_PAY_INPORT,
+ FLOW_PAY_EXT_META,
+ FLOW_PAY_MAC_MPLS,
+ FLOW_PAY_L4,
+ FLOW_PAY_IPV4,
+ FLOW_PAY_IPV6,
+ FLOW_PAY_CT,
+ FLOW_PAY_GRE,
+ FLOW_PAY_QINQ,
+ FLOW_PAY_UDP_TUN,
+ FLOW_PAY_GENEVE_OPT,
+
+ _FLOW_PAY_LAYERS_MAX
};
/**
@@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
*/
int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
void *cb_priv);
+
+/**
+ * nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows
+ * @flow: TC flower classifier offload structure.
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_stats(struct flow_cls_offload *flow,
+ struct nfp_fl_ct_map_entry *ct_map_ent);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 0fbd682ccf72..917c450a7aad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
+void
+nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk, u8 key_type);
+void
+nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext);
+int
+nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
+ bool mask_version, enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule);
+int
+nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
+ struct nfp_flower_tp_ports *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
+ struct nfp_flower_ipv4_udp_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule);
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack);
-int nfp_compile_flow_metadata(struct nfp_app *app,
- struct flow_cls_offload *flow,
+int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack);
@@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
+
+struct nfp_fl_payload *
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer);
+int nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_fl_key_ls *ret_key_ls,
+ struct flow_rule *flow,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow);
+int
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype);
+void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 255a4dff6288..9d86eea4dc16 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -7,51 +7,68 @@
#include "cmsg.h"
#include "main.h"
-static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
- struct nfp_flower_meta_tci *msk,
- struct flow_rule *rule, u8 key_type, bool qinq_sup)
+void
+nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk, u8 key_type)
{
- u16 tmp_tci;
-
- memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
- memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
-
/* Populate the metadata frame. */
ext->nfp_flow_key_layer = key_type;
ext->mask_id = ~0;
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
+}
- if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+void
+nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule)
+{
+ u16 msk_tci, key_tci;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.key->vlan_id);
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.mask->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.mask->vlan_id);
- msk->tci = cpu_to_be16(tmp_tci);
+
+ ext->tci |= cpu_to_be16((key_tci & msk_tci));
+ msk->tci |= cpu_to_be16(msk_tci);
}
}
static void
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
+
+ nfp_flower_compile_meta(ext, msk, key_type);
+
+ if (!qinq_sup)
+ nfp_flower_compile_tci(ext, msk, rule);
+}
+
+void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
-static int
+int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version, enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
@@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
-static int
+void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
- struct netlink_ext_ack *extack)
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
- memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ int i;
flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
- ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
- ether_addr_copy(ext->mac_src, &match.key->src[0]);
- ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
- ether_addr_copy(msk->mac_src, &match.mask->src[0]);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ext->mac_dst[i] |= match.key->dst[i] &
+ match.mask->dst[i];
+ msk->mac_dst[i] |= match.mask->dst[i];
+ ext->mac_src[i] |= match.key->src[i] &
+ match.mask->src[i];
+ msk->mac_src[i] |= match.mask->src[i];
+ }
}
+}
+int
+nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
- u32 t_mpls;
+ u32 key_mpls, msk_mpls;
flow_rule_match_mpls(rule, &match);
@@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
return -EOPNOTSUPP;
}
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
- match.key->ls[0].mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
- match.key->ls[0].mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
- match.key->ls[0].mpls_bos) |
- NFP_FLOWER_MASK_MPLS_Q;
- ext->mpls_lse = cpu_to_be32(t_mpls);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
- match.mask->ls[0].mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
- match.mask->ls[0].mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
- match.mask->ls[0].mpls_bos) |
- NFP_FLOWER_MASK_MPLS_Q;
- msk->mpls_lse = cpu_to_be32(t_mpls);
+ key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.key->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.key->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[0].mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[0].mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
+ msk->mpls_lse |= cpu_to_be32(msk_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
* bit, which indicates an mpls ether type but without any
@@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
flow_rule_match_basic(rule, &match);
if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
- ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
- msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
return 0;
}
-static void
+static int
+nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
+ memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
+
+ nfp_flower_compile_mac(ext, msk, rule);
+
+ return nfp_flower_compile_mpls(ext, msk, rule, extack);
+}
+
+void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
- memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- ext->port_src = match.key->src;
- ext->port_dst = match.key->dst;
- msk->port_src = match.mask->src;
- msk->port_dst = match.mask->dst;
+ ext->port_src |= match.key->src & match.mask->src;
+ ext->port_dst |= match.key->dst & match.mask->dst;
+ msk->port_src |= match.mask->src;
+ msk->port_dst |= match.mask->dst;
}
}
@@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
- ext->proto = match.key->ip_proto;
- msk->proto = match.mask->ip_proto;
+ ext->proto |= match.key->ip_proto & match.mask->ip_proto;
+ msk->proto |= match.mask->ip_proto;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
+ ext->tos |= match.key->tos & match.mask->tos;
+ ext->ttl |= match.key->ttl & match.mask->ttl;
+ msk->tos |= match.mask->tos;
+ msk->ttl |= match.mask->ttl;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
@@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
-nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
- struct nfp_flower_vlan *frame,
- bool outer_vlan)
+nfp_flower_fill_vlan(struct flow_match_vlan *match,
+ struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk, bool outer_vlan)
{
- u16 tci;
-
- tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- key->vlan_id);
+ struct flow_dissector_key_vlan *mask = match->mask;
+ struct flow_dissector_key_vlan *key = match->key;
+ u16 msk_tci, key_tci;
+
+ key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ key->vlan_id);
+ msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ mask->vlan_id);
if (outer_vlan) {
- frame->outer_tci = cpu_to_be16(tci);
- frame->outer_tpid = key->vlan_tpid;
+ ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
+ ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
+ msk->outer_tci |= cpu_to_be16(msk_tci);
+ msk->outer_tpid |= mask->vlan_tpid;
} else {
- frame->inner_tci = cpu_to_be16(tci);
- frame->inner_tpid = key->vlan_tpid;
+ ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
+ ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
+ msk->inner_tci |= cpu_to_be16(msk_tci);
+ msk->inner_tpid |= mask->vlan_tpid;
}
}
-static void
+void
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
struct nfp_flower_vlan *msk,
struct flow_rule *rule)
{
struct flow_match_vlan match;
- memset(ext, 0, sizeof(struct nfp_flower_vlan));
- memset(msk, 0, sizeof(struct nfp_flower_vlan));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
flow_rule_match_vlan(rule, &match);
- nfp_flower_fill_vlan(match.key, ext, true);
- nfp_flower_fill_vlan(match.mask, msk, true);
+ nfp_flower_fill_vlan(&match, ext, msk, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
flow_rule_match_cvlan(rule, &match);
- nfp_flower_fill_vlan(match.key, ext, false);
- nfp_flower_fill_vlan(match.mask, msk, false);
+ nfp_flower_fill_vlan(&match, ext, msk, false);
}
}
-static void
+void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_match_ipv4_addrs match;
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
flow_rule_match_ipv4_addrs(rule, &match);
- ext->ipv4_src = match.key->src;
- ext->ipv4_dst = match.key->dst;
- msk->ipv4_src = match.mask->src;
- msk->ipv4_dst = match.mask->dst;
+ ext->ipv4_src |= match.key->src & match.mask->src;
+ ext->ipv4_dst |= match.key->dst & match.mask->dst;
+ msk->ipv4_src |= match.mask->src;
+ msk->ipv4_dst |= match.mask->dst;
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
-static void
+void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ int i;
flow_rule_match_ipv6_addrs(rule, &match);
- ext->ipv6_src = match.key->src;
- ext->ipv6_dst = match.key->dst;
- msk->ipv6_src = match.mask->src;
- msk->ipv6_dst = match.mask->dst;
+ for (i = 0; i < sizeof(ext->ipv6_src); i++) {
+ ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
+ msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
+ }
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
-static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
+void
+nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
+ int i;
- flow_rule_match_enc_opts(rule, &match);
- memcpy(ext, match.key->data, match.key->len);
- memcpy(msk, match.mask->data, match.mask->len);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ flow_rule_match_enc_opts(rule, &match);
- return 0;
+ for (i = 0; i < match.mask->len; i++) {
+ ext[i] |= match.key->data[i] & match.mask->data[i];
+ msk[i] |= match.mask->data[i];
+ }
+ }
}
static void
@@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
- ext->src = match.key->src;
- ext->dst = match.key->dst;
- msk->src = match.mask->src;
- msk->dst = match.mask->dst;
+ ext->src |= match.key->src & match.mask->src;
+ ext->dst |= match.key->dst & match.mask->dst;
+ msk->src |= match.mask->src;
+ msk->dst |= match.mask->dst;
}
}
@@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ int i;
flow_rule_match_enc_ipv6_addrs(rule, &match);
- ext->src = match.key->src;
- ext->dst = match.key->dst;
- msk->src = match.mask->src;
- msk->dst = match.mask->dst;
+ for (i = 0; i < sizeof(ext->src); i++) {
+ ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
+ msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
+ }
}
}
@@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
+ ext->tos |= match.key->tos & match.mask->tos;
+ ext->ttl |= match.key->ttl & match.mask->ttl;
+ msk->tos |= match.mask->tos;
+ msk->ttl |= match.mask->ttl;
}
}
@@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
u32 vni;
flow_rule_match_enc_keyid(rule, &match);
- vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- *key = cpu_to_be32(vni);
+ vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
+ NFP_FL_TUN_VNI_OFFSET;
+ *key |= cpu_to_be32(vni);
vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- *key_msk = cpu_to_be32(vni);
+ *key_msk |= cpu_to_be32(vni);
}
}
@@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- *key = match.key->keyid;
- *key_msk = match.mask->keyid;
+ *key |= match.key->keyid & match.mask->keyid;
+ *key_msk |= match.mask->keyid;
*flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
*flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
}
-static void
+void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
-
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
@@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
&ext->tun_flags, &msk->tun_flags, rule);
}
-static void
+void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
-
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
-static void
+void
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
struct nfp_flower_ipv6_udp_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
-
nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
-static void
+void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
struct nfp_flower_ipv6_gre_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
-
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
@@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_flower_priv *priv = app->priv;
bool qinq_sup;
u32 port_id;
@@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- (struct nfp_flower_mac_mpls *)msk,
- rule, extack);
+ err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
+ (struct nfp_flower_mac_mpls *)msk,
+ rule, extack);
if (err)
return err;
@@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, rule);
- if (err)
- return err;
+ nfp_flower_compile_geneve_opt(ext, msk, rule);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 621113650a9b..2af9faee96c5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
return true;
}
-int nfp_compile_flow_metadata(struct nfp_app *app,
- struct flow_cls_offload *flow,
+int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack)
@@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
}
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
- nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
nfp_flow->ingress_dev = netdev;
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
@@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
+ check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
if (nfp_release_stats_entry(app, stats_cxt)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2406d33356ad..556c3495211d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -41,6 +41,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
+ BIT(FLOW_DISSECTOR_KEY_CT) | \
+ BIT(FLOW_DISSECTOR_KEY_META) | \
BIT(FLOW_DISSECTOR_KEY_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
@@ -89,7 +91,7 @@ struct nfp_flower_merge_check {
};
};
-static int
+int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
{
@@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
-static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
+static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
-static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
+static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
@@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
return 0;
}
-static int
+int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
enum nfp_flower_tun_type *tun_type,
struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
@@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
return -EOPNOTSUPP;
}
- } else if (nfp_flower_check_higher_than_mac(flow)) {
+ } else if (nfp_flower_check_higher_than_mac(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
return -EOPNOTSUPP;
}
@@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
- nfp_flower_check_higher_than_l3(flow)) {
+ nfp_flower_check_higher_than_l3(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
return -EOPNOTSUPP;
}
@@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return 0;
}
-static struct nfp_fl_payload *
+struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
- struct flow_cls_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
- struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
struct nfp_merge_info *merge_info;
@@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
ASSERT_RTNL();
- extack = merge_tc_off.common.extack;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err)
goto err_unlink_sub_flow1;
- merge_tc_off.cookie = merge_flow->tc_flower_cookie;
- err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
- merge_flow->ingress_dev, extack);
+ err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
+ merge_flow->ingress_dev, NULL);
if (err)
goto err_unlink_sub_flow2;
@@ -1305,6 +1298,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct netlink_ext_ack *extack = NULL;
@@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
&tun_type, extack);
if (err)
goto err_free_key_ls;
@@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
+ err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
@@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
}
- err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
+ err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
@@ -1476,7 +1470,7 @@ err_free_links:
kfree_rcu(merge_flow, rcu);
}
-static void
+void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
@@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
}
}
-static void
+void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
@@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
+ /* Check ct_map table first */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent)
+ return nfp_fl_ct_stats(flow, ct_map_ent);
+
extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df5b748be068..df203738511b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/dim.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -360,6 +361,9 @@ struct nfp_net_rx_ring {
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
* @irq_entry: MSI-X table entry (use for talking to the device)
+ * @event_ctr: Number of interrupt
+ * @rx_dim: Dynamic interrupt moderation structure for RX
+ * @tx_dim: Dynamic interrupt moderation structure for TX
* @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets
* @rx_bytes: Number of received bytes
@@ -410,6 +414,10 @@ struct nfp_net_r_vector {
u16 irq_entry;
+ u16 event_ctr;
+ struct dim rx_dim;
+ struct dim tx_dim;
+
struct u64_stats_sync rx_sync;
u64 rx_pkts;
u64 rx_bytes;
@@ -571,6 +579,8 @@ struct nfp_net_dp {
* mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
+ * @rx_coalesce_adapt_on: Is RX interrupt moderation adaptive?
+ * @tx_coalesce_adapt_on: Is TX interrupt moderation adaptive?
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
* @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter
@@ -654,6 +664,8 @@ struct nfp_net {
struct semaphore bar_lock;
+ bool rx_coalesce_adapt_on;
+ bool tx_coalesce_adapt_on;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs;
@@ -919,6 +931,14 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
return netdev->netdev_ops == &nfp_net_netdev_ops;
}
+static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
+{
+ if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
+ return -EINVAL;
+
+ return 0;
+}
+
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 5dfa4799c34f..15078f9dc9f1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -474,6 +474,12 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
{
struct nfp_net_r_vector *r_vec = data;
+ /* Currently we cannot tell if it's a rx or tx interrupt,
+ * since dim does not need accurate event_ctr to calculate,
+ * we just use this counter for both rx and tx dim.
+ */
+ r_vec->event_ctr++;
+
napi_schedule_irqoff(&r_vec->napi);
/* The FW auto-masks any interrupt, either via the MASK bit in
@@ -1697,7 +1703,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_RESYNC_INFO:
if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
pkt_len))
- return NULL;
+ return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
default:
@@ -2061,6 +2067,36 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, pkts_polled))
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (r_vec->nfp_net->rx_coalesce_adapt_on) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
return pkts_polled;
}
@@ -2873,6 +2909,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
*/
static void nfp_net_close_stack(struct nfp_net *nn)
{
+ struct nfp_net_r_vector *r_vec;
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
@@ -2880,8 +2917,16 @@ static void nfp_net_close_stack(struct nfp_net *nn)
nn->link_up = false;
for (r = 0; r < nn->dp.num_r_vecs; r++) {
- disable_irq(nn->r_vecs[r].irq_vector);
- napi_disable(&nn->r_vecs[r].napi);
+ r_vec = &nn->r_vecs[r];
+
+ disable_irq(r_vec->irq_vector);
+ napi_disable(&r_vec->napi);
+
+ if (r_vec->rx_ring)
+ cancel_work_sync(&r_vec->rx_dim.work);
+
+ if (r_vec->tx_ring)
+ cancel_work_sync(&r_vec->tx_dim.work);
}
netif_tx_disable(nn->dp.netdev);
@@ -2948,17 +2993,92 @@ void nfp_ctrl_close(struct nfp_net *nn)
rtnl_unlock();
}
+static void nfp_net_rx_dim_work(struct work_struct *work)
+{
+ struct nfp_net_r_vector *r_vec;
+ unsigned int factor, value;
+ struct dim_cq_moder moder;
+ struct nfp_net *nn;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
+ nn = r_vec->nfp_net;
+
+ /* Compute factor used to convert coalesce '_usecs' parameters to
+ * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
+ * count.
+ */
+ factor = nn->tlv_caps.me_freq_mhz / 16;
+ if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ return;
+
+ /* copy RX interrupt coalesce parameters */
+ value = (moder.pkts << 16) | (factor * moder.usec);
+ rtnl_lock();
+ nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
+ (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
+ rtnl_unlock();
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void nfp_net_tx_dim_work(struct work_struct *work)
+{
+ struct nfp_net_r_vector *r_vec;
+ unsigned int factor, value;
+ struct dim_cq_moder moder;
+ struct nfp_net *nn;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
+ nn = r_vec->nfp_net;
+
+ /* Compute factor used to convert coalesce '_usecs' parameters to
+ * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
+ * count.
+ */
+ factor = nn->tlv_caps.me_freq_mhz / 16;
+ if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ return;
+
+ /* copy TX interrupt coalesce parameters */
+ value = (moder.pkts << 16) | (factor * moder.usec);
+ rtnl_lock();
+ nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
+ (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
+ rtnl_unlock();
+
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* nfp_net_open_stack() - Start the device from stack's perspective
* @nn: NFP Net device to reconfigure
*/
static void nfp_net_open_stack(struct nfp_net *nn)
{
+ struct nfp_net_r_vector *r_vec;
unsigned int r;
for (r = 0; r < nn->dp.num_r_vecs; r++) {
- napi_enable(&nn->r_vecs[r].napi);
- enable_irq(nn->r_vecs[r].irq_vector);
+ r_vec = &nn->r_vecs[r];
+
+ if (r_vec->rx_ring) {
+ INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
+ r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
+ if (r_vec->tx_ring) {
+ INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
+ r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
+ napi_enable(&r_vec->napi);
+ enable_irq(r_vec->irq_vector);
}
netif_tx_wake_all_queues(nn->dp.netdev);
@@ -3893,6 +4013,9 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
nn->rx_coalesce_max_frames = 64;
nn->tx_coalesce_usecs = 50;
nn->tx_coalesce_max_frames = 64;
+
+ nn->rx_coalesce_adapt_on = true;
+ nn->tx_coalesce_adapt_on = true;
}
static void nfp_net_netdev_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b482446536d..a213784ffa54 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1083,6 +1083,9 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
return -EINVAL;
+ ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
+ ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
+
ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
@@ -1359,19 +1362,18 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
return -EINVAL;
- if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
- return -EINVAL;
-
- if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
+ if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor,
+ ec->rx_max_coalesced_frames))
return -EINVAL;
- if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
- return -EINVAL;
-
- if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
+ if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor,
+ ec->tx_max_coalesced_frames))
return -EINVAL;
/* configuration is valid */
+ nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
+ nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce;
+
nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
@@ -1443,7 +1445,8 @@ static int nfp_net_set_channels(struct net_device *netdev,
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 64c6842bd452..d29fe562b3de 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1219,7 +1219,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_stop = lpc_eth_close,
.ndo_start_xmit = lpc_eth_hard_start_xmit,
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index e351f3d1608f..bc35d5703bd2 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2333,7 +2333,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
.ndo_tx_timeout = pch_gbe_tx_timeout,
.ndo_change_mtu = pch_gbe_change_mtu,
.ndo_set_features = pch_gbe_set_features,
- .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_eth_ioctl = pch_gbe_ioctl,
.ndo_set_rx_mode = pch_gbe_set_multi,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pch_gbe_netpoll,
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index d058a63602a9..1a6336a56d3d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -546,7 +546,9 @@ static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_open(struct net_device *dev);
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static void hamachi_timer(struct timer_list *t);
static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void hamachi_init_ring(struct net_device *dev);
@@ -571,7 +573,8 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = hamachi_ioctl,
+ .ndo_siocdevprivate = hamachi_siocdevprivate,
};
@@ -1867,7 +1870,36 @@ static const struct ethtool_ops ethtool_ops_no_mii = {
.get_drvinfo = hamachi_get_drvinfo,
};
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+/* private ioctl: set rx,tx intr params */
+static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ u32 *d = (u32 *)&rq->ifr_ifru;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd != SIOCDEVPRIVATE + 3)
+ return -EOPNOTSUPP;
+
+ /* Should add this check here or an ordinary user can do nasty
+ * things. -KDU
+ *
+ * TODO: Shut down the Rx and Tx engines while doing this.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ writel(d[0], np->base + TxIntrCtrl);
+ writel(d[1], np->base + RxIntrCtrl);
+ printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
+ (u32)readl(np->base + TxIntrCtrl),
+ (u32)readl(np->base + RxIntrCtrl));
+
+ return 0;
+}
+
+static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct hamachi_private *np = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
@@ -1876,28 +1908,9 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
- u32 *d = (u32 *)&rq->ifr_ifru;
- /* Should add this check here or an ordinary user can do nasty
- * things. -KDU
- *
- * TODO: Shut down the Rx and Tx engines while doing this.
- */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- writel(d[0], np->base + TxIntrCtrl);
- writel(d[1], np->base + RxIntrCtrl);
- printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
- (u32) readl(np->base + TxIntrCtrl),
- (u32) readl(np->base + RxIntrCtrl));
- rc = 0;
- }
-
- else {
- spin_lock_irq(&np->lock);
- rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
- spin_unlock_irq(&np->lock);
- }
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
return rc;
}
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d1dd9bc1bc7f..f5cd8f51be7c 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -362,7 +362,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index e4a5416adc80..7e296fa71b36 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -165,10 +165,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
goto out;
}
+ ionic->num_vfs++;
/* ignore failures from older FW, we just won't get stats */
(void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
(u8 *)&v->stats_pa);
- ionic->num_vfs++;
}
out:
@@ -373,9 +373,6 @@ static void ionic_remove(struct pci_dev *pdev)
{
struct ionic *ionic = pci_get_drvdata(pdev);
- if (!ionic)
- return;
-
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1dfe962e22e0..9aac647290f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -106,6 +106,8 @@ int ionic_dev_setup(struct ionic *ionic)
idev->last_fw_hb = 0;
idev->fw_hb_ready = true;
idev->fw_status_ready = true;
+ idev->fw_generation = IONIC_FW_STS_F_GENERATION &
+ ioread8(&idev->dev_info_regs->fw_status);
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
@@ -121,7 +123,9 @@ int ionic_heartbeat_check(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long check_time, last_check_time;
- bool fw_status_ready, fw_hb_ready;
+ bool fw_status_ready = true;
+ bool fw_hb_ready;
+ u8 fw_generation;
u8 fw_status;
u32 fw_hb;
@@ -140,9 +144,29 @@ do_check_time:
/* firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
+ * If fw_status is not ready don't bother with the generation.
*/
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+
+ if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
+ fw_status_ready = false;
+ } else {
+ fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
+ if (idev->fw_generation != fw_generation) {
+ dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n",
+ idev->fw_generation, fw_generation);
+
+ idev->fw_generation = fw_generation;
+
+ /* If the generation changed, the fw status is not
+ * ready so we need to trigger a fw-down cycle. After
+ * the down, the next watchdog will see the fw is up
+ * and the generation value stable, so will trigger
+ * the fw-up activity.
+ */
+ fw_status_ready = false;
+ }
+ }
/* is this a transition? */
if (fw_status_ready != idev->fw_status_ready) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c25cf9b744c5..8311086fb1f4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -143,6 +143,7 @@ struct ionic_dev {
u32 last_fw_hb;
bool fw_hb_ready;
bool fw_status_ready;
+ u8 fw_generation;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -160,8 +161,6 @@ struct ionic_dev {
struct ionic_cq_info {
union {
void *cq_desc;
- struct ionic_txq_comp *txcq;
- struct ionic_rxq_comp *rxcq;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index b41301a5b0df..cd520e4c5522 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
- if (err)
+ if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- else
- devlink_port_type_eth_set(&ionic->dl_port,
- ionic->lif->netdev);
+ devlink_unregister(dl);
+ return err;
+ }
- return err;
+ devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ return 0;
}
void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- if (ionic->dl_port.registered)
- devlink_port_unregister(&ionic->dl_port);
+ devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6583be570e45..adc9fdb03e86 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -32,6 +32,9 @@ static void ionic_get_stats(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
u32 i;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
memset(buf, 0, stats->n_stats * sizeof(*buf));
for (i = 0; i < ionic_num_stats_grps; i++)
ionic_stats_groups[i].get_values(lif, &buf);
@@ -274,6 +277,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int err = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
mutex_lock(&ionic->dev_cmd_lock);
@@ -320,6 +326,9 @@ static int ionic_set_pauseparam(struct net_device *netdev,
u32 requested_pause;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -372,6 +381,9 @@ static int ionic_set_fecparam(struct net_device *netdev,
u8 fec_type;
int ret = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
if (lif->ionic->idev.port_info->config.an_enable) {
netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
return -EINVAL;
@@ -528,6 +540,9 @@ static int ionic_set_ringparam(struct net_device *netdev,
struct ionic_queue_params qparam;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
@@ -597,6 +612,9 @@ static int ionic_set_channels(struct net_device *netdev,
int max_cnt;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
@@ -947,6 +965,9 @@ static int ionic_nway_reset(struct net_device *netdev)
struct ionic *ionic = lif->ionic;
int err = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
/* flap the link to force auto-negotiation */
mutex_lock(&ionic->dev_cmd_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 0478b48d9895..278610ed7227 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -2936,6 +2936,8 @@ struct ionic_hwstamp_regs {
* @asic_type: Asic type
* @asic_rev: Asic revision
* @fw_status: Firmware status
+ * bit 0 - 1 = fw running
+ * bit 4-7 - 4 bit generation number, changes on fw restart
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
* @fw_version: Firmware version
@@ -2949,7 +2951,8 @@ union ionic_dev_info_regs {
u8 version;
u8 asic_type;
u8 asic_rev;
-#define IONIC_FW_STS_F_RUNNING 0x1
+#define IONIC_FW_STS_F_RUNNING 0x01
+#define IONIC_FW_STS_F_GENERATION 0xF0
u8 fw_status;
u32 fw_heartbeat;
char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index e795fa63ca12..f52c47a71f4b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -1599,7 +1600,6 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_RXCSUM |
@@ -1607,6 +1607,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
NETIF_F_TSO6 |
NETIF_F_TSO_ECN;
+ if (lif->nxqs > 1)
+ features |= NETIF_F_RXHASH;
+
err = ionic_set_nic_features(lif, features);
if (err)
return err;
@@ -2257,7 +2260,7 @@ static int ionic_stop(struct net_device *netdev)
return 0;
}
-static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -2519,7 +2522,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
- .ndo_do_ioctl = ionic_do_ioctl,
+ .ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
@@ -2580,22 +2583,26 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_qcq **tx_qcqs = NULL;
struct ionic_qcq **rx_qcqs = NULL;
unsigned int flags, i;
- int err = -ENOMEM;
+ int err = 0;
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
- if (!tx_qcqs)
+ if (!tx_qcqs) {
+ err = -ENOMEM;
goto err_out;
+ }
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
qparam->rxq_features != lif->rxq_features) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
- if (!rx_qcqs)
+ if (!rx_qcqs) {
+ err = -ENOMEM;
goto err_out;
+ }
}
/* allocate new desc_info and rings, but leave the interrupt setup
@@ -2774,6 +2781,9 @@ err_out:
ionic_qcq_free(lif, lif->rxqcqs[i]);
}
+ if (err)
+ netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
+
return err;
}
@@ -2827,8 +2837,14 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->ionic = ionic;
lif->index = 0;
- lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
- lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+
+ if (is_kdump_kernel()) {
+ lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
+ lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
+ } else {
+ lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
+ lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+ }
/* Convert the default coalesce value to actual hw resolution */
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
@@ -3514,6 +3530,7 @@ int ionic_lif_size(struct ionic *ionic)
unsigned int min_intrs;
int err;
+ /* retrieve basic values from FW */
lc = &ident->lif.eth.config;
dev_nintrs = le32_to_cpu(ident->dev.nintrs);
neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
@@ -3521,6 +3538,15 @@ int ionic_lif_size(struct ionic *ionic)
ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
+ /* limit values to play nice with kdump */
+ if (is_kdump_kernel()) {
+ dev_nintrs = 2;
+ neqs_per_lif = 0;
+ nnqs_per_lif = 0;
+ ntxqs_per_lif = 1;
+ nrxqs_per_lif = 1;
+ }
+
/* reserve last queue id for hardware timestamping */
if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 61cfe2120817..5f1e5b6e85c3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -450,6 +450,8 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
+ dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
+
if (err) {
dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
goto err_out;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 6e2403c71608..afc45da399d4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -119,8 +119,8 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n",
- config->rx_filter, rx_filt, rx_all);
+ dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n",
+ __func__, config->rx_filter, rx_filt, rx_all);
if (tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08870190e4d2..37c39581b659 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
-{
- buf_info->page = NULL;
- buf_info->page_offset = 0;
- buf_info->dma_addr = 0;
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
+ struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
return -EINVAL;
}
- buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!buf_info->page)) {
+ page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+ if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
return -ENOMEM;
}
- buf_info->page_offset = 0;
- buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+ buf_info->dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
- __free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
return -EIO;
}
+ buf_info->page = page;
+ buf_info->page_offset = 0;
+
return 0;
}
@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- prefetch(buf_info->page);
+ prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
buf_info++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b590c70539b5..d58e021614cd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,15 +26,6 @@
extern const struct qed_common_ops qed_common_ops_pass;
-#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 37
-#define QED_REVISION_VERSION 0
-#define QED_ENGINEERING_VERSION 20
-
-#define QED_VERSION \
- ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
- (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
-
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
@@ -517,12 +508,6 @@ enum qed_hsi_def_type {
QED_NUM_HSI_DEFS
};
-#define DRV_MODULE_VERSION \
- __stringify(QED_MAJOR_VERSION) "." \
- __stringify(QED_MINOR_VERSION) "." \
- __stringify(QED_REVISION_VERSION) "." \
- __stringify(QED_ENGINEERING_VERSION)
-
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e81dd34a3cac..dc93ddea8906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -741,7 +741,6 @@ static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_dcbx_mib_meta_data data;
- int rc = 0;
memset(&data, 0, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
@@ -750,7 +749,7 @@ qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
data.size = sizeof(struct lldp_config_params_s);
qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
- return rc;
+ return 0;
}
static int
@@ -810,7 +809,6 @@ static int
qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_dcbx_mib_meta_data data;
- int rc = 0;
memset(&data, 0, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
@@ -819,7 +817,7 @@ qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
data.size = sizeof(struct dcbx_local_params);
qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
- return rc;
+ return 0;
}
static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5bd58c65e163..aa48b1b7eddc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -49,11 +49,10 @@
#define QED_NVM_CFG_MAX_ATTRS 50
static char version[] =
- "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
+ "QLogic FastLinQ 4xxxx Core Module qed\n";
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define FW_FILE_VERSION \
__stringify(FW_MAJOR_VERSION) "." \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4387292c37e2..6e5a6cc97d0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -944,7 +944,6 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
memset(&in_params, 0, sizeof(in_params));
in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
- in_params.drv_ver_0 = QED_VERSION;
in_params.drv_ver_1 = qed_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 2e62a2c4eb63..8693117a6180 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -30,15 +30,6 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 37
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 20
-#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
- __stringify(QEDE_MINOR_VERSION) "." \
- __stringify(QEDE_REVISION_VERSION) "." \
- __stringify(QEDE_ENGINEERING_VERSION)
-
#define DRV_MODULE_SYM qede
struct qede_stats_common {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1560ad3d9290..9c6aa6859646 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -625,13 +625,13 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ if ((strlen(storm) + strlen("[storm]")) <
sizeof(info->version))
snprintf(info->version, sizeof(info->version),
- "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ "[storm %s]", storm);
else
snprintf(info->version, sizeof(info->version),
- "%s %s", DRV_MODULE_VERSION, storm);
+ "%s", storm);
if (edev->dev_info.common.mbi_version) {
snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 01ac1e93d27a..033bf2c7f56c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -39,12 +39,8 @@
#include "qede.h"
#include "qede_ptp.h"
-static char version[] =
- "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
-
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static uint debug;
module_param(debug, uint, 0);
@@ -258,7 +254,7 @@ int __init qede_init(void)
{
int ret;
- pr_info("qede_init: %s\n", version);
+ pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
qede_forced_speed_maps_init();
@@ -644,7 +640,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_do_ioctl = qede_ioctl,
+ .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -1150,10 +1146,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- sp_params.drv_major = QEDE_MAJOR_VERSION;
- sp_params.drv_minor = QEDE_MINOR_VERSION;
- sp_params.drv_rev = QEDE_REVISION_VERSION;
- sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index ad655f0a4965..9015a38eaced 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -377,7 +377,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = emac_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_get_stats64 = emac_get_stats64,
.ndo_set_features = emac_set_features,
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 47e9998b62f0..4b2eca5e08e2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -954,7 +954,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9677e257e9a1..edc61906694f 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1869,7 +1869,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_set_mac_address = cp_set_mac_address,
.ndo_set_rx_mode = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats,
- .ndo_do_ioctl = cp_ioctl,
+ .ndo_eth_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index f0608f050050..2e6923cc653e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -932,7 +932,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
.ndo_set_rx_mode = rtl8139_set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = rtl8139_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8139_poll_controller,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c7af5bc3b8af..fa2dab6980bb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4979,7 +4979,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_fix_features = rtl8169_fix_features,
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 805397088850..f4dfe9f71d06 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1872,7 +1872,7 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
- .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_eth_ioctl = ravb_do_ioctl,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 840478692a37..6c8ba916d1a6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3141,7 +3141,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3157,7 +3157,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 315a6e5c0f59..e75814a4654f 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -119,7 +119,8 @@ struct rocker_world_ops {
int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
u16 vid, const unsigned char *addr);
int (*port_master_linked)(struct rocker_port *rocker_port,
- struct net_device *master);
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
int (*port_master_unlinked)(struct rocker_port *rocker_port,
struct net_device *master);
int (*port_neigh_update)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index a46633606cae..53d407a5dbf7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1670,13 +1670,14 @@ rocker_world_port_fdb_del(struct rocker_port *rocker_port,
}
static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
- struct net_device *master)
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_master_linked)
return -EOPNOTSUPP;
- return wops->port_master_linked(rocker_port, master);
+ return wops->port_master_linked(rocker_port, master, extack);
}
static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
@@ -3107,6 +3108,7 @@ struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
struct rocker_port *rocker_port;
@@ -3123,7 +3125,8 @@ static int rocker_netdevice_event(struct notifier_block *unused,
rocker_port = netdev_priv(dev);
if (info->linking) {
err = rocker_world_port_master_linked(rocker_port,
- info->upper_dev);
+ info->upper_dev,
+ extack);
if (err)
netdev_warn(dev, "failed to reflect master linked (err %d)\n",
err);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 967a634ee9ac..b82e169b7836 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2571,8 +2571,10 @@ static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
}
static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
- struct net_device *bridge)
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
{
+ struct net_device *dev = ofdpa_port->dev;
int err;
/* Port is joining bridge, so the internal VLAN for the
@@ -2592,13 +2594,21 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
ofdpa_port->bridge_dev = bridge;
- return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ if (err)
+ return err;
+
+ return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
+ false, extack);
}
static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
{
+ struct net_device *dev = ofdpa_port->dev;
int err;
+ switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
+
err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err)
return err;
@@ -2637,13 +2647,14 @@ static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
}
static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
- struct net_device *master)
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
int err = 0;
if (netif_is_bridge_master(master))
- err = ofdpa_port_bridge_join(ofdpa_port, master);
+ err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
else if (netif_is_ovs_master(master))
err = ofdpa_port_ovs_changed(ofdpa_port, master);
return err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 090bcd2fb758..6781aa636d58 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1964,7 +1964,7 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_features = sxgbe_set_features,
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
- .ndo_do_ioctl = sxgbe_ioctl,
+ .ndo_eth_ioctl = sxgbe_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 37fcf2eb0741..a295e2621cf3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -591,7 +591,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
+ .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 9ec752a43c75..c177ea0f301e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2219,7 +2219,7 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_tx_timeout = ef4_watchdog,
.ndo_start_xmit = ef4_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ef4_ioctl,
+ .ndo_eth_ioctl = ef4_ioctl,
.ndo_change_mtu = ef4_change_mtu,
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 2b29fd4cbdf4..062f7844c496 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -820,7 +820,7 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_tx_timeout = ioc3_timeout,
.ndo_get_stats = ioc3_get_stats,
.ndo_set_rx_mode = ioc3_set_multicast_list,
- .ndo_do_ioctl = ioc3_ioctl,
+ .ndo_eth_ioctl = ioc3_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ioc3_set_mac_address,
};
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 0c396ecd3389..efce834d8ee6 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -812,7 +812,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
.ndo_start_xmit = meth_tx,
- .ndo_do_ioctl = meth_ioctl,
+ .ndo_eth_ioctl = meth_ioctl,
.ndo_tx_timeout = meth_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 676b193833c0..3d1a18a01ce5 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1841,7 +1841,7 @@ static int sis190_mac_addr(struct net_device *dev, void *p)
static const struct net_device_ops sis190_netdev_ops = {
.ndo_open = sis190_open,
.ndo_stop = sis190_close,
- .ndo_do_ioctl = sis190_ioctl,
+ .ndo_eth_ioctl = sis190_ioctl,
.ndo_start_xmit = sis190_start_xmit,
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_rx_mode = sis190_set_rx_mode,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cff87de9178a..60a0c0e9ded2 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -404,7 +404,7 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = mii_ioctl,
+ .ndo_eth_ioctl = mii_ioctl,
.ndo_tx_timeout = sis900_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sis900_poll,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 51cd7dca91cd..44daf79a8f97 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -312,7 +312,7 @@ static const struct net_device_ops epic_netdev_ops = {
.ndo_tx_timeout = epic_tx_timeout,
.ndo_get_stats = epic_get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index f2a50eb3c1e0..42fc37c7887a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -294,7 +294,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_tx_timeout = smc_tx_timeout,
.ndo_set_config = s9k_config,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = smc_ioctl,
+ .ndo_eth_ioctl = smc_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 556a9790cdcf..199a97339280 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2148,7 +2148,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index c1dab009415d..fdbd2a43e267 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1482,7 +1482,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
.ndo_set_rx_mode = smsc9420_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 20d148c019d8..d15f7b3a3f10 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1831,7 +1831,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_features = netsec_netdev_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_xdp_xmit = netsec_xdp_xmit,
.ndo_bpf = netsec_xdp,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 5eb6bb4f7b6c..ae31ed93aaf0 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1543,7 +1543,7 @@ static const struct net_device_ops ave_netdev_ops = {
.ndo_open = ave_open,
.ndo_stop = ave_stop,
.ndo_start_xmit = ave_start_xmit,
- .ndo_do_ioctl = ave_ioctl,
+ .ndo_eth_ioctl = ave_ioctl,
.ndo_set_rx_mode = ave_set_rx_mode,
.ndo_get_stats64 = ave_get_stats64,
.ndo_set_mac_address = ave_set_mac_address,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7b8404a21544..a2aa75cb184e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6451,7 +6451,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_features = stmmac_set_features,
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
- .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_eth_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 981685c88308..287ae4c538aa 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4876,7 +4876,7 @@ static const struct net_device_ops cas_netdev_ops = {
.ndo_start_xmit = cas_start_xmit,
.ndo_get_stats = cas_get_stats,
.ndo_set_rx_mode = cas_set_multicast,
- .ndo_do_ioctl = cas_ioctl,
+ .ndo_eth_ioctl = cas_ioctl,
.ndo_tx_timeout = cas_tx_timeout,
.ndo_change_mtu = cas_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 860644d182ab..a95e99da1ae5 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9668,7 +9668,7 @@ static const struct net_device_ops niu_netdev_ops = {
.ndo_set_rx_mode = niu_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = niu_set_mac_addr,
- .ndo_do_ioctl = niu_ioctl,
+ .ndo_eth_ioctl = niu_ioctl,
.ndo_tx_timeout = niu_tx_timeout,
.ndo_change_mtu = niu_change_mtu,
};
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index cfb9e21b18b7..d72018a60c0f 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2831,7 +2831,7 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_start_xmit = gem_start_xmit,
.ndo_get_stats = gem_get_stats,
.ndo_set_rx_mode = gem_set_multicast,
- .ndo_do_ioctl = gem_ioctl,
+ .ndo_eth_ioctl = gem_ioctl,
.ndo_tx_timeout = gem_tx_timeout,
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 26d178f8616b..1db7104fef3a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -933,7 +933,7 @@ static const struct net_device_ops xlgmac_netdev_ops = {
.ndo_change_mtu = xlgmac_change_mtu,
.ndo_set_mac_address = xlgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xlgmac_ioctl,
+ .ndo_eth_ioctl = xlgmac_ioctl,
.ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index d054c6e83b1c..8f6abaec41d1 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -637,7 +637,8 @@ static int bdx_range_check(struct bdx_priv *priv, u32 offset)
-EINVAL : 0;
}
-static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
+static int bdx_siocdevprivate(struct net_device *ndev, struct ifreq *ifr,
+ void __user *udata, int cmd)
{
struct bdx_priv *priv = netdev_priv(ndev);
u32 data[3];
@@ -647,7 +648,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
if (cmd != SIOCDEVPRIVATE) {
- error = copy_from_user(data, ifr->ifr_data, sizeof(data));
+ error = copy_from_user(data, udata, sizeof(data));
if (error) {
pr_err("can't copy from user\n");
RET(-EFAULT);
@@ -669,7 +670,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
data[2] = READ_REG(priv, data[1]);
DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
data[2]);
- error = copy_to_user(ifr->ifr_data, data, sizeof(data));
+ error = copy_to_user(udata, data, sizeof(data));
if (error)
RET(-EFAULT);
break;
@@ -688,15 +689,6 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
return 0;
}
-static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- ENTER;
- if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
- RET(bdx_ioctl_priv(ndev, ifr, cmd));
- else
- RET(-EOPNOTSUPP);
-}
-
/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
* @ndev: network device
@@ -1860,7 +1852,7 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_stop = bdx_close,
.ndo_start_xmit = bdx_tx_transmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = bdx_ioctl,
+ .ndo_siocdevprivate = bdx_siocdevprivate,
.ndo_set_rx_mode = bdx_setmulti,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index affcf92cd3aa..7ac8e5ecbe97 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -64,6 +64,7 @@ config TI_CPSW
config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on TI_CPTS || !TI_CPTS
select PAGE_POOL
@@ -109,6 +110,7 @@ config TI_K3_AM65_CPSW_NUSS
config TI_K3_AM65_CPSW_SWITCHDEV
bool "TI K3 AM654x/J721E CPSW Switch mode support"
depends on TI_K3_AM65_CPSW_NUSS
+ depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
help
This enables switchdev support for TI K3 CPSWxG Ethernet
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 718539cdd2f2..4f67d1a98c0d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -1479,7 +1480,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
- .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
+ .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
@@ -2077,10 +2078,13 @@ bool am65_cpsw_port_dev_check(const struct net_device *ndev)
return false;
}
-static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ int err;
if (!common->br_members) {
common->hw_bridge_dev = br_ndev;
@@ -2092,6 +2096,11 @@ static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_dev
return -EOPNOTSUPP;
}
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
common->br_members |= BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
@@ -2104,6 +2113,8 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
common->br_members &= ~BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
@@ -2116,6 +2127,7 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
static int am65_cpsw_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
int ret = NOTIFY_DONE;
@@ -2129,7 +2141,9 @@ static int am65_cpsw_netdevice_event(struct notifier_block *unused,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
+ ret = am65_cpsw_netdevice_port_link(ndev,
+ info->upper_dev,
+ extack);
else
am65_cpsw_netdevice_port_unlink(ndev);
}
@@ -2384,21 +2398,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = {
am65_cpsw_dl_switch_mode_set, NULL),
};
-static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
-{
- struct devlink_port *dl_port;
- struct am65_cpsw_port *port;
- int i;
-
- for (i = 1; i <= common->port_num; i++) {
- port = am65_common_get_port(common, i);
- dl_port = &port->devlink_port;
-
- if (dl_port->registered)
- devlink_port_unregister(dl_port);
- }
-}
-
static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
{
struct devlink_port_attrs attrs = {};
@@ -2460,7 +2459,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
return ret;
dl_port_unreg:
- am65_cpsw_unregister_devlink_ports(common);
+ for (i = i - 1; i >= 1; i--) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
dl_unreg:
devlink_unregister(common->devlink);
dl_free:
@@ -2471,6 +2475,17 @@ dl_free:
static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
+
if (!AM65_CPSW_IS_CPSW2G(common) &&
IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
devlink_params_unpublish(common->devlink);
@@ -2478,7 +2493,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
ARRAY_SIZE(am65_cpsw_devlink_params));
}
- am65_cpsw_unregister_devlink_ports(common);
devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index c20715107075..02d4e51f7306 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1044,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cbbd0f665796..abf9a2a6f7eb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1159,7 +1159,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 57d279fdcc9f..b4f55ff4e84f 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
+#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
@@ -1127,7 +1128,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1499,10 +1500,12 @@ static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
}
static int cpsw_netdevice_port_link(struct net_device *ndev,
- struct net_device *br_ndev)
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ int err;
if (!cpsw->br_members) {
cpsw->hw_bridge_dev = br_ndev;
@@ -1514,6 +1517,11 @@ static int cpsw_netdevice_port_link(struct net_device *ndev,
return -EOPNOTSUPP;
}
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
cpsw->br_members |= BIT(priv->emac_port);
cpsw_port_offload_fwd_mark_update(cpsw);
@@ -1526,6 +1534,8 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
cpsw->br_members &= ~BIT(priv->emac_port);
cpsw_port_offload_fwd_mark_update(cpsw);
@@ -1538,6 +1548,7 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev)
static int cpsw_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
int ret = NOTIFY_DONE;
@@ -1552,7 +1563,8 @@ static int cpsw_netdevice_event(struct notifier_block *unused,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
ret = cpsw_netdevice_port_link(ndev,
- info->upper_dev);
+ info->upper_dev,
+ extack);
else
cpsw_netdevice_port_unlink(ndev);
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index c674e34b6839..637796670746 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1670,7 +1670,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_dev_xmit,
.ndo_set_rx_mode = emac_dev_mcast_set,
.ndo_set_mac_address = emac_dev_setmac_addr,
- .ndo_do_ioctl = emac_devioctl,
+ .ndo_eth_ioctl = emac_devioctl,
.ndo_tx_timeout = emac_dev_tx_timeout,
.ndo_get_stats = emac_dev_getnetstats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 97942b0e3897..eda2961c0fe2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1944,7 +1944,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_stop = netcp_ndo_stop,
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
- .ndo_do_ioctl = netcp_ndo_ioctl,
+ .ndo_eth_ioctl = netcp_ndo_ioctl,
.ndo_get_stats64 = netcp_get_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index e0cb713193ea..77c448ad67ce 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -749,7 +749,7 @@ static const struct net_device_ops tlan_netdev_ops = {
.ndo_tx_timeout = tlan_tx_timeout,
.ndo_get_stats = tlan_get_stats,
.ndo_set_rx_mode = tlan_set_multicast_list,
- .ndo_do_ioctl = tlan_ioctl,
+ .ndo_eth_ioctl = tlan_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 226a76633e65..087f0af56c50 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2214,7 +2214,7 @@ static const struct net_device_ops spider_net_ops = {
.ndo_start_xmit = spider_net_xmit,
.ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
- .ndo_do_ioctl = spider_net_do_ioctl,
+ .ndo_eth_ioctl = spider_net_do_ioctl,
.ndo_tx_timeout = spider_net_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
/* HW VLAN */
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index fedb2bf69261..52245ac60fc7 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -750,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c62f474b6d08..cf0917b29e30 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1538,7 +1538,7 @@ static const struct net_device_ops tsi108_netdev_ops = {
.ndo_start_xmit = tsi108_send_packet,
.ndo_set_rx_mode = tsi108_set_rx_mode,
.ndo_get_stats = tsi108_get_stats,
- .ndo_do_ioctl = tsi108_do_ioctl,
+ .ndo_eth_ioctl = tsi108_do_ioctl,
.ndo_set_mac_address = tsi108_set_mac,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 73ca597ebd1b..961b623b7880 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -884,7 +884,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout,
.ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 88426b5e410b..278f49518d3f 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2637,7 +2637,7 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = velocity_set_multi,
.ndo_change_mtu = velocity_change_mtu,
- .ndo_do_ioctl = velocity_ioctl,
+ .ndo_eth_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60a4f79b8fa1..db1994fb51c5 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1237,7 +1237,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 13cd799541aa..348c0ba5edcf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1227,7 +1227,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = axienet_ioctl,
+ .ndo_eth_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b06377fe7293..b780aad3550a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1263,7 +1263,7 @@ static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_eth_ioctl = xemaclite_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 4f6db6f5c272..ae611e46da6a 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -464,7 +464,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = do_start_xmit,
.ndo_tx_timeout = xirc_tx_timeout,
.ndo_set_config = do_config,
- .ndo_do_ioctl = do_ioctl,
+ .ndo_eth_ioctl = do_ioctl,
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7ae754eadf22..ff50305d6e13 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1357,7 +1357,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_stop = eth_close,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_do_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = eth_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 69c29a2ef95d..f62e98fada1a 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -70,6 +70,7 @@ static const char * const boot_msg =
/* Include files */
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -103,7 +104,8 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
static void skfp_ctl_set_multicast_list(struct net_device *dev);
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
-static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev);
static void send_queued_packets(struct s_smc *smc);
@@ -164,7 +166,7 @@ static const struct net_device_ops skfp_netdev_ops = {
.ndo_get_stats = skfp_ctl_get_stats,
.ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
- .ndo_do_ioctl = skfp_ioctl,
+ .ndo_siocdevprivate = skfp_siocdevprivate,
};
/*
@@ -932,9 +934,9 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
/*
- * ==============
- * = skfp_ioctl =
- * ==============
+ * =======================
+ * = skfp_siocdevprivate =
+ * =======================
*
* Overview:
*
@@ -954,16 +956,19 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
*/
-static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
- if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
+ if (copy_from_user(&ioc, data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch (ioc.cmd) {
case SKFP_GET_STATS: /* Get the driver statistics */
ioc.len = sizeof(lp->MacStat);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 4435a1195194..775dcf4ebde5 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1005,7 +1005,8 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct baycom_state *bc = netdev_priv(dev);
struct hdlcdrv_ioctl hi;
@@ -1013,7 +1014,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
- if (copy_from_user(&hi, ifr->ifr_data, sizeof(hi)))
+ if (copy_from_user(&hi, data, sizeof(hi)))
return -EFAULT;
switch (hi.cmd) {
default:
@@ -1104,7 +1105,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return HDLCDRV_PARMASK_IOBASE;
}
- if (copy_to_user(ifr->ifr_data, &hi, sizeof(hi)))
+ if (copy_to_user(data, &hi, sizeof(hi)))
return -EFAULT;
return 0;
}
@@ -1114,7 +1115,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static const struct net_device_ops baycom_netdev_ops = {
.ndo_open = epp_open,
.ndo_stop = epp_close,
- .ndo_do_ioctl = baycom_ioctl,
+ .ndo_siocdevprivate = baycom_siocdevprivate,
.ndo_start_xmit = baycom_send_packet,
.ndo_set_mac_address = baycom_set_mac_address,
};
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 6a3dc7b3f28a..fd7da5bb1fa5 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -380,7 +380,7 @@ static int par96_close(struct net_device *dev)
* ===================== hdlcdrv driver interface =========================
*/
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -408,7 +408,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -428,7 +428,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_GETMODE:
strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -440,7 +440,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "par96,picpar");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -449,7 +449,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -464,7 +464,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 04bb409707fc..646f605e358f 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -462,7 +462,7 @@ static int ser12_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -497,7 +497,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -519,7 +519,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
sprintf(hi->data.modename, "ser%u", bc->baud / 100);
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : "+");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12,ser3,ser24");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -540,7 +540,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -555,7 +555,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index a1acb3a47bdb..5d1ab4840753 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -521,7 +521,7 @@ static int ser12_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -551,7 +551,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -573,7 +573,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
strcpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -585,7 +585,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -594,7 +594,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -609,7 +609,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0e623c2e8b2d..d967b0748773 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -314,9 +314,10 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
* source ethernet address (broadcast
* or multicast: accept all)
*/
-static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int bpq_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- struct bpq_ethaddr __user *ethaddr = ifr->ifr_data;
+ struct bpq_ethaddr __user *ethaddr = data;
struct bpqdev *bpq = netdev_priv(dev);
struct bpq_req req;
@@ -325,7 +326,7 @@ static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSBPQETHOPT:
- if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
+ if (copy_from_user(&req, data, sizeof(struct bpq_req)))
return -EFAULT;
switch (req.cmd) {
case SIOCGBPQETHPARAM:
@@ -448,7 +449,7 @@ static const struct net_device_ops bpq_netdev_ops = {
.ndo_stop = bpq_close,
.ndo_start_xmit = bpq_xmit,
.ndo_set_mac_address = bpq_set_mac_address,
- .ndo_do_ioctl = bpq_ioctl,
+ .ndo_siocdevprivate = bpq_siocdevprivate,
};
static void bpq_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index c25c8c99c5c7..b50b7fafd8d6 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -225,7 +225,8 @@ static int read_scc_data(struct scc_priv *priv);
static int scc_open(struct net_device *dev);
static int scc_close(struct net_device *dev);
-static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
static int scc_set_mac_address(struct net_device *dev, void *sa);
@@ -432,7 +433,7 @@ static const struct net_device_ops scc_netdev_ops = {
.ndo_open = scc_open,
.ndo_stop = scc_close,
.ndo_start_xmit = scc_send_packet,
- .ndo_do_ioctl = scc_ioctl,
+ .ndo_siocdevprivate = scc_siocdevprivate,
.ndo_set_mac_address = scc_set_mac_address,
};
@@ -881,15 +882,13 @@ static int scc_close(struct net_device *dev)
}
-static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct scc_priv *priv = dev->ml_priv;
switch (cmd) {
case SIOCGSCCPARAM:
- if (copy_to_user
- (ifr->ifr_data, &priv->param,
- sizeof(struct scc_param)))
+ if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
return -EFAULT;
return 0;
case SIOCSSCCPARAM:
@@ -897,13 +896,12 @@ static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EPERM;
if (netif_running(dev))
return -EAGAIN;
- if (copy_from_user
- (&priv->param, ifr->ifr_data,
- sizeof(struct scc_param)))
+ if (copy_from_user(&priv->param, data,
+ sizeof(struct scc_param)))
return -EFAULT;
return 0;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index cbaf1cdde7cb..5805cfc83854 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -483,23 +483,25 @@ static int hdlcdrv_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct hdlcdrv_state *s = netdev_priv(dev);
struct hdlcdrv_ioctl bi;
- if (cmd != SIOCDEVPRIVATE) {
- if (s->ops && s->ops->ioctl)
- return s->ops->ioctl(dev, ifr, &bi, cmd);
+ if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
- }
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+
+ if (in_compat_syscall()) /* to be implemented */
+ return -ENOIOCTLCMD;
+
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
if (s->ops && s->ops->ioctl)
- return s->ops->ioctl(dev, ifr, &bi, cmd);
+ return s->ops->ioctl(dev, data, &bi, cmd);
return -ENOIOCTLCMD;
case HDLCDRVCTL_GETCHANNELPAR:
@@ -605,7 +607,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
@@ -617,7 +619,7 @@ static const struct net_device_ops hdlcdrv_netdev = {
.ndo_open = hdlcdrv_open,
.ndo_stop = hdlcdrv_close,
.ndo_start_xmit = hdlcdrv_send_packet,
- .ndo_do_ioctl = hdlcdrv_ioctl,
+ .ndo_siocdevprivate = hdlcdrv_siocdevprivate,
.ndo_set_mac_address = hdlcdrv_set_mac_address,
};
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 3f1edd0526a4..e0bb131a33d7 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -210,7 +210,8 @@ static int scc_net_close(struct net_device *dev);
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
static netdev_tx_t scc_net_tx(struct sk_buff *skb,
struct net_device *dev);
-static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int scc_net_set_mac_address(struct net_device *dev, void *addr);
static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
@@ -1550,7 +1551,7 @@ static const struct net_device_ops scc_netdev_ops = {
.ndo_start_xmit = scc_net_tx,
.ndo_set_mac_address = scc_net_set_mac_address,
.ndo_get_stats = scc_net_get_stats,
- .ndo_do_ioctl = scc_net_ioctl,
+ .ndo_siocdevprivate = scc_net_siocdevprivate,
};
/* ----> Initialize device <----- */
@@ -1703,7 +1704,8 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
* SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg
*/
-static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int scc_net_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifr, void __user *arg, int cmd)
{
struct scc_kiss_cmd kiss_cmd;
struct scc_mem_config memcfg;
@@ -1712,8 +1714,6 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
int chan;
unsigned char device_name[IFNAMSIZ];
- void __user *arg = ifr->ifr_data;
-
if (!Driver_Initialized)
{
@@ -1722,6 +1722,9 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
int found = 1;
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
if (!arg) return -EFAULT;
if (Nchips >= SCC_MAXCHIPS)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index d4911041596c..6ddacbdb224b 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -920,15 +920,15 @@ static int yam_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
- if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
- return -EFAULT;
+ if (copy_from_user(&ioctl_cmd, data, sizeof(int)))
+ return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
@@ -947,8 +947,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- ym = memdup_user(ifr->ifr_data,
- sizeof(struct yamdrv_ioctl_mcs));
+ ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
if (ym->cmd != SIOCYAMSMCS)
@@ -965,8 +964,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
- return -EFAULT;
+ if (copy_from_user(&yi, data, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
if (yi.cmd != SIOCYAMSCFG)
return -EINVAL;
@@ -1045,8 +1044,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
- if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
- return -EFAULT;
+ if (copy_to_user(data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
break;
default:
@@ -1074,7 +1073,7 @@ static const struct net_device_ops yam_netdev_ops = {
.ndo_open = yam_open,
.ndo_stop = yam_close,
.ndo_start_xmit = yam_send_packet,
- .ndo_do_ioctl = yam_ioctl,
+ .ndo_siocdevprivate = yam_siocdevprivate,
.ndo_set_mac_address = yam_set_mac_address,
};
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 22010384c4a3..7661dbb31162 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -63,7 +63,7 @@ static const char version[] =
static const struct net_device_ops rr_netdev_ops = {
.ndo_open = rr_open,
.ndo_stop = rr_close,
- .ndo_do_ioctl = rr_ioctl,
+ .ndo_siocdevprivate = rr_siocdevprivate,
.ndo_start_xmit = rr_start_xmit,
.ndo_set_mac_address = hippi_mac_addr,
};
@@ -1568,7 +1568,8 @@ out:
}
-static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct rr_private *rrpriv;
unsigned char *image, *oldimage;
@@ -1603,7 +1604,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
error = -EFAULT;
goto gf_out;
}
- error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
+ error = copy_to_user(data, image, EEPROM_BYTES);
if (error)
error = -EFAULT;
gf_out:
@@ -1615,7 +1616,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EPERM;
}
- image = memdup_user(rq->ifr_data, EEPROM_BYTES);
+ image = memdup_user(data, EEPROM_BYTES);
if (IS_ERR(image))
return PTR_ERR(image);
@@ -1658,7 +1659,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return error;
case SIOCRRID:
- return put_user(0x52523032, (int __user *)rq->ifr_data);
+ return put_user(0x52523032, (int __user *)data);
default:
return error;
}
diff --git a/drivers/net/hippi/rrunner.h b/drivers/net/hippi/rrunner.h
index 87533784604f..55377614e752 100644
--- a/drivers/net/hippi/rrunner.h
+++ b/drivers/net/hippi/rrunner.h
@@ -835,7 +835,8 @@ static int rr_open(struct net_device *dev);
static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int rr_close(struct net_device *dev);
-static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 506f8d5cd4ee..75435d40b920 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,6 +1,3 @@
-# Un-comment the next line if you want to validate configuration data
-#ccflags-y += -DIPA_VALIDATE
-
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 427c68b2ad8f..3de67ba066a6 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1964,7 +1964,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
-#ifdef IPA_VALIDATION
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2010,7 +2009,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
channel_id, data->channel.event_count);
return false;
}
-#endif /* IPA_VALIDATION */
return true;
}
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 8c795a6a8598..1544564bc283 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
{
void *virt;
-#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
-#endif /* IPA_VALIDATE */
/* By allocating a few extra entries in our pool (one less
* than the maximum number that will be requested in a
@@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
dma_addr_t addr;
void *virt;
-#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
-#endif /* IPA_VALIDATE */
/* Don't let allocations cross a power-of-two boundary */
size = __roundup_pow_of_two(size);
@@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
{
u32 offset;
- /* assert(count > 0); */
- /* assert(count <= pool->max_alloc); */
+ WARN_ON(!count);
+ WARN_ON(count > pool->max_alloc);
/* Allocate from beginning if wrap would occur */
if (count > pool->count - pool->free)
@@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
{
void *end = pool->base + pool->count * pool->size;
- /* assert(element >= pool->base); */
- /* assert(element < end); */
- /* assert(pool->max_alloc == 1); */
+ WARN_ON(element < pool->base);
+ WARN_ON(element >= end);
+ WARN_ON(pool->max_alloc != 1);
+
element += pool->size;
return element < end ? element : pool->base;
@@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
- /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
+ if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ return NULL;
trans_info = &channel->trans_info;
@@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
u32 which = trans->used++;
struct scatterlist *sg;
- /* assert(which < trans->tre_count); */
+ WARN_ON(which >= trans->tre_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- /* assert(trans->tre_count == 1); */
- /* assert(!trans->used); */
+ if (WARN_ON(trans->tre_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used))
+ return -EINVAL;
sg_set_page(sg, page, size, offset);
ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
@@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
u32 used;
int ret;
- /* assert(trans->tre_count == 1); */
- /* assert(!trans->used); */
+ if (WARN_ON(trans->tre_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used))
+ return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
u32 avail;
u32 i;
- /* assert(trans->used > 0); */
+ WARN_ON(!trans->used);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 744406832a77..71ba996096bb 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -51,6 +51,7 @@ enum ipa_flag {
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information
+ * @uc_clocked: true if clock is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
@@ -95,6 +96,7 @@ struct ipa {
__le64 *table_virt;
struct ipa_interrupt *interrupt;
+ bool uc_clocked;
bool uc_loaded;
dma_addr_t reg_addr;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index af44ca41189e..cff51731195a 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
#undef TABLE_COUNT_MAX
#undef TABLE_SIZE
-}
-#ifdef IPA_VALIDATE
+ /* Hashed and non-hashed fields are assumed to be the same size */
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
+
+ /* Valid endpoint numbers must fit in the IP packet init command */
+ BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
+ IPA_ENDPOINT_MAX - 1);
+}
/* Validate a memory region holding a table */
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route, bool ipv6, bool hashed)
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
{
+ u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+ const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
- u32 offset_max;
- offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
- : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ /* Size must fit in the immediate command field that holds it */
+ if (mem->size > size_max) {
+ dev_err(dev, "%s table region size too large\n", table);
+ dev_err(dev, " (0x%04x > 0x%04x)\n",
+ mem->size, size_max);
+
+ return false;
+ }
+
+ /* Offset must fit in the immediate command field that holds it */
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
- dev_err(dev, "IPv%c %s%s table region offset too large\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter");
+ dev_err(dev, "%s table region offset too large\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
+ /* Entire memory range must fit within IPA-local memory */
if (mem->offset > ipa->mem_size ||
mem->size > ipa->mem_size - mem->offset) {
- dev_err(dev, "IPv%c %s%s table region out of range\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter");
+ dev_err(dev, "%s table region out of range\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, mem->size, ipa->mem_size);
@@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa)
return true;
}
-#endif /* IPA_VALIDATE */
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
@@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
- /* assert(endpoint_id <
- field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
-
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
@@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
u16 flags;
/* size and offset must fit in 16 bit fields */
- /* assert(size > 0 && size <= U16_MAX); */
- /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
+ WARN_ON(!size);
+ WARN_ON(size > U16_MAX);
+ WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
offset += ipa->mem_offset;
@@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
- /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
-
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index b99262281f41..69cd085d427d 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -57,20 +57,16 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
-#ifdef IPA_VALIDATE
-
/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
- * @ipv6: - Whether the table is for IPv6 or IPv4
- * @hashed: - Whether the table is hashed or non-hashed
*
* Return: true if region is valid, false otherwise
*/
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route, bool ipv6, bool hashed);
+ bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
@@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
*/
bool ipa_cmd_data_valid(struct ipa *ipa);
-#else /* !IPA_VALIDATE */
-
-static inline bool ipa_cmd_table_valid(struct ipa *ipa,
- const struct ipa_mem *mem, bool route,
- bool ipv6, bool hashed)
-{
- return true;
-}
-
-static inline bool ipa_cmd_data_valid(struct ipa *ipa)
-{
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/**
* ipa_cmd_pool_init() - initialize command channel pools
* @channel: AP->IPA command TX GSI channel pointer
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index 9353efbd504f..782f67e3e079 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -105,6 +105,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,6 +129,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -368,18 +370,13 @@ static const struct ipa_mem_data ipa_mem_data = {
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
- .peak_bandwidth = 465000, /* 465 MBps */
- .average_bandwidth = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- {
- .name = "imem",
- .peak_bandwidth = 68570, /* 68.57 MBps */
- .average_bandwidth = 80000, /* 80 MBps (unused?) */
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
},
+ /* Average rate is unused for the next interconnect */
{
.name = "config",
- .peak_bandwidth = 30000, /* 30 MBps */
+ .peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index a99b6478fa3a..db6fda2fe43d 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -114,6 +114,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -137,6 +138,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index 798d43e1eb13..6ab928266b5c 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -106,6 +106,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -129,6 +130,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ab02669bae4e..8070d1a1d5df 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -250,17 +250,18 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
/* Suspend is not supported for IPA v4.0+. Delay doesn't work
* correctly on IPA v4.2.
- *
- * if (endpoint->toward_ipa)
- * assert(ipa->version != IPA_VERSION_4.2);
- * else
- * assert(ipa->version < IPA_VERSION_4_0);
*/
+ if (endpoint->toward_ipa)
+ WARN_ON(ipa->version == IPA_VERSION_4_2);
+ else
+ WARN_ON(ipa->version >= IPA_VERSION_4_0);
+
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
- /* Don't bother if it's already in the requested state */
state = !!(val & mask);
+
+ /* Don't bother if it's already in the requested state */
if (suspend_delay != state) {
val ^= mask;
iowrite32(val, ipa->reg_virt + offset);
@@ -273,7 +274,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
- /* assert(endpoint->toward_ipa); */
+ WARN_ON(!endpoint->toward_ipa);
/* Delay mode doesn't work properly for IPA v4.2 */
if (endpoint->ipa->version != IPA_VERSION_4_2)
@@ -287,7 +288,8 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 offset;
u32 val;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
+
offset = ipa_reg_state_aggr_active_offset(ipa->version);
val = ioread32(ipa->reg_virt + offset);
@@ -299,7 +301,8 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
+
iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
}
@@ -338,7 +341,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
if (endpoint->ipa->version >= IPA_VERSION_4_0)
return enable; /* For IPA v4.0+, no change made */
- /* assert(!endpoint->toward_ipa); */
+ WARN_ON(endpoint->toward_ipa);
suspended = ipa_endpoint_init_ctrl(endpoint, enable);
@@ -1156,7 +1159,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
if (!endpoint->netdev)
return false;
- /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
+ WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+
skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
if (skb) {
/* Reserve the headroom and account for the data */
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c46df0b7c4e5..aa37f03f4557 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -74,21 +74,25 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
iowrite32(mask, ipa->reg_virt + offset);
}
-/* Process all IPA interrupt types that have been signaled */
-static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
+/* IPA IRQ handler is threaded */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
{
+ struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ u32 pending;
u32 offset;
u32 mask;
+ ipa_clock_get(ipa);
+
/* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
offset = ipa_reg_irq_stts_offset(ipa->version);
- mask = ioread32(ipa->reg_virt + offset);
- while ((mask &= enabled)) {
+ pending = ioread32(ipa->reg_virt + offset);
+ while ((mask = pending & enabled)) {
do {
u32 irq_id = __ffs(mask);
@@ -96,43 +100,20 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, irq_id);
} while (mask);
- mask = ioread32(ipa->reg_virt + offset);
+ pending = ioread32(ipa->reg_virt + offset);
}
-}
-/* Threaded part of the IPA IRQ handler */
-static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
-{
- struct ipa_interrupt *interrupt = dev_id;
-
- ipa_clock_get(interrupt->ipa);
-
- ipa_interrupt_process_all(interrupt);
-
- ipa_clock_put(interrupt->ipa);
-
- return IRQ_HANDLED;
-}
+ /* If any disabled interrupts are pending, clear them */
+ if (pending) {
+ struct device *dev = &ipa->pdev->dev;
-/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
-static irqreturn_t ipa_isr(int irq, void *dev_id)
-{
- struct ipa_interrupt *interrupt = dev_id;
- struct ipa *ipa = interrupt->ipa;
- u32 offset;
- u32 mask;
-
- offset = ipa_reg_irq_stts_offset(ipa->version);
- mask = ioread32(ipa->reg_virt + offset);
- if (mask & interrupt->enabled)
- return IRQ_WAKE_THREAD;
-
- /* Nothing in the mask was supposed to cause an interrupt */
- offset = ipa_reg_irq_clr_offset(ipa->version);
- iowrite32(mask, ipa->reg_virt + offset);
+ dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
+ pending);
+ offset = ipa_reg_irq_clr_offset(ipa->version);
+ iowrite32(pending, ipa->reg_virt + offset);
+ }
- dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
- __func__, mask);
+ ipa_clock_put(ipa);
return IRQ_HANDLED;
}
@@ -146,7 +127,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 offset;
u32 val;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
@@ -206,7 +187,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 offset;
- /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
@@ -222,7 +204,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
struct ipa *ipa = interrupt->ipa;
u32 offset;
- /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
offset = ipa_reg_irq_en_offset(ipa->version);
@@ -231,8 +214,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
interrupt->handler[ipa_irq] = NULL;
}
-/* Set up the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
+/* Configure the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
@@ -258,7 +241,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
offset = ipa_reg_irq_en_offset(ipa->version);
iowrite32(0, ipa->reg_virt + offset);
- ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
+ ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
@@ -281,8 +264,8 @@ err_kfree:
return ERR_PTR(ret);
}
-/* Tear down the IPA interrupt framework */
-void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
+/* Inverse of ipa_interrupt_config() */
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
int ret;
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index d5c486a6800d..231390cea52a 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -86,17 +86,17 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
/**
- * ipa_interrupt_setup() - Set up the IPA interrupt framework
+ * ipa_interrupt_config() - Configure the IPA interrupt framework
* @ipa: IPA pointer
*
* Return: Pointer to IPA SMP2P info, or a pointer-coded error
*/
-struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
/**
- * ipa_interrupt_teardown() - Tear down the IPA interrupt framework
+ * ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_teardown(struct ipa_interrupt *interrupt);
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 9810c61a0320..2e728d4914c8 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -124,19 +124,12 @@ int ipa_setup(struct ipa *ipa)
if (ret)
return ret;
- ipa->interrupt = ipa_interrupt_setup(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- goto err_gsi_teardown;
- }
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
ipa_suspend_handler);
- ipa_uc_setup(ipa);
-
ret = device_init_wakeup(dev, true);
if (ret)
- goto err_uc_teardown;
+ goto err_interrupt_remove;
ipa_endpoint_setup(ipa);
@@ -167,7 +160,7 @@ int ipa_setup(struct ipa *ipa)
ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
/* We're all set. Now prepare for communication with the modem */
- ret = ipa_modem_setup(ipa);
+ ret = ipa_qmi_setup(ipa);
if (ret)
goto err_default_route_clear;
@@ -185,11 +178,8 @@ err_command_disable:
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
(void)device_init_wakeup(dev, false);
-err_uc_teardown:
- ipa_uc_teardown(ipa);
+err_interrupt_remove:
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
- ipa_interrupt_teardown(ipa->interrupt);
-err_gsi_teardown:
gsi_teardown(&ipa->gsi);
return ret;
@@ -204,7 +194,10 @@ static void ipa_teardown(struct ipa *ipa)
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- ipa_modem_teardown(ipa);
+ /* We're going to tear everything down, as if setup never completed */
+ ipa->setup_complete = false;
+
+ ipa_qmi_teardown(ipa);
ipa_endpoint_default_route_clear(ipa);
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_endpoint_disable_one(exception_endpoint);
@@ -212,9 +205,7 @@ static void ipa_teardown(struct ipa *ipa)
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
(void)device_init_wakeup(&ipa->pdev->dev, false);
- ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
- ipa_interrupt_teardown(ipa->interrupt);
gsi_teardown(&ipa->gsi);
}
@@ -253,9 +244,6 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
const struct ipa_qsb_data *data1;
u32 val;
- /* assert(data->qsb_count > 0); */
- /* assert(data->qsb_count < 3); */
-
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
if (data->qsb_count > 1)
@@ -293,7 +281,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
*/
static u32 ipa_aggr_granularity_val(u32 usec)
{
- /* assert(usec != 0); */
+ WARN_ON(!usec);
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
@@ -471,31 +459,44 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
ipa_hardware_config(ipa, data);
- ret = ipa_endpoint_config(ipa);
+ ret = ipa_mem_config(ipa);
if (ret)
goto err_hardware_deconfig;
- ret = ipa_mem_config(ipa);
+ ipa->interrupt = ipa_interrupt_config(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ ipa->interrupt = NULL;
+ goto err_mem_deconfig;
+ }
+
+ ipa_uc_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
if (ret)
- goto err_endpoint_deconfig;
+ goto err_interrupt_deconfig;
ipa_table_config(ipa); /* No deconfig required */
/* Assign resource limitation to each group; no deconfig required */
ret = ipa_resource_config(ipa, data->resource_data);
if (ret)
- goto err_mem_deconfig;
+ goto err_endpoint_deconfig;
ret = ipa_modem_config(ipa);
if (ret)
- goto err_mem_deconfig;
+ goto err_endpoint_deconfig;
return 0;
-err_mem_deconfig:
- ipa_mem_deconfig(ipa);
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
+err_interrupt_deconfig:
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+err_mem_deconfig:
+ ipa_mem_deconfig(ipa);
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
@@ -510,8 +511,11 @@ err_hardware_deconfig:
static void ipa_deconfig(struct ipa *ipa)
{
ipa_modem_deconfig(ipa);
- ipa_mem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+ ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
}
@@ -612,7 +616,6 @@ MODULE_DEVICE_TABLE(of, ipa_match);
* */
static void ipa_validate_build(void)
{
-#ifdef IPA_VALIDATE
/* At one time we assumed a 64-bit build, allowing some do_div()
* calls to be replaced by simple division or modulo operations.
* We currently only perform divide and modulo operations on u32,
@@ -646,7 +649,6 @@ static void ipa_validate_build(void)
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
field_max(AGGR_GRANULARITY_FMASK));
-#endif /* IPA_VALIDATE */
}
static bool ipa_version_valid(enum ipa_version version)
@@ -771,9 +773,12 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_table_exit;
+ /* The clock needs to be active for config and setup */
+ ipa_clock_get(ipa);
+
ret = ipa_config(ipa, data);
if (ret)
- goto err_modem_exit;
+ goto err_clock_put; /* Error */
dev_info(dev, "IPA driver initialized");
@@ -782,7 +787,7 @@ static int ipa_probe(struct platform_device *pdev)
* we're done here.
*/
if (modem_init)
- return 0;
+ goto out_clock_put; /* Done; no error */
/* Otherwise we need to load the firmware and have Trust Zone validate
* and install it. If that succeeds we can proceed with setup.
@@ -795,11 +800,15 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_deconfig;
+out_clock_put:
+ ipa_clock_put(ipa);
+
return 0;
err_deconfig:
ipa_deconfig(ipa);
-err_modem_exit:
+err_clock_put:
+ ipa_clock_put(ipa);
ipa_modem_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
@@ -825,6 +834,8 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa_clock *clock = ipa->clock;
int ret;
+ ipa_clock_get(ipa);
+
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
/* If starting or stopping is in progress, try once more */
@@ -839,6 +850,9 @@ static int ipa_remove(struct platform_device *pdev)
}
ipa_deconfig(ipa);
+
+ ipa_clock_put(ipa);
+
ipa_modem_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
@@ -874,13 +888,11 @@ static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
- /* When a suspended RX endpoint has a packet ready to receive, we
- * get an IPA SUSPEND interrupt. We trigger a system resume in
- * that case, but only on the first such interrupt since suspend.
- */
- __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
-
- ipa_endpoint_suspend(ipa);
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
+ ipa_endpoint_suspend(ipa);
+ }
ipa_clock_put(ipa);
@@ -906,7 +918,9 @@ static int ipa_resume(struct device *dev)
*/
ipa_clock_get(ipa);
- ipa_endpoint_resume(ipa);
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete)
+ ipa_endpoint_resume(ipa);
return 0;
}
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index af9aedbde717..4ea8287e9d23 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -19,6 +19,8 @@
#include "ipa_modem.h"
#include "ipa_smp2p.h"
#include "ipa_qmi.h"
+#include "ipa_uc.h"
+#include "ipa_clock.h"
#define IPA_NETDEV_NAME "rmnet_ipa%d"
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
@@ -43,9 +45,12 @@ static int ipa_open(struct net_device *netdev)
struct ipa *ipa = priv->ipa;
int ret;
+ ipa_clock_get(ipa);
+
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
if (ret)
- return ret;
+ goto err_clock_put;
+
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
if (ret)
goto err_disable_tx;
@@ -56,6 +61,8 @@ static int ipa_open(struct net_device *netdev)
err_disable_tx:
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+err_clock_put:
+ ipa_clock_put(ipa);
return ret;
}
@@ -71,6 +78,8 @@ static int ipa_stop(struct net_device *netdev)
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_clock_put(ipa);
+
return 0;
}
@@ -278,6 +287,8 @@ static void ipa_modem_crashed(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
+ ipa_clock_get(ipa);
+
ipa_endpoint_modem_pause_all(ipa, true);
ipa_endpoint_modem_hol_block_clear_all(ipa);
@@ -302,6 +313,8 @@ static void ipa_modem_crashed(struct ipa *ipa)
ret = ipa_mem_zero_modem(ipa);
if (ret)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+
+ ipa_clock_put(ipa);
}
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
@@ -314,6 +327,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
dev_info(dev, "received modem starting event\n");
+ ipa_uc_clock(ipa);
ipa_smp2p_notify_reset(ipa);
break;
@@ -377,13 +391,3 @@ void ipa_modem_deconfig(struct ipa *ipa)
ipa->notifier = NULL;
memset(&ipa->nb, 0, sizeof(ipa->nb));
}
-
-int ipa_modem_setup(struct ipa *ipa)
-{
- return ipa_qmi_setup(ipa);
-}
-
-void ipa_modem_teardown(struct ipa *ipa)
-{
- ipa_qmi_teardown(ipa);
-}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index 2de3e216d1d4..5e6e3d234454 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -7,7 +7,6 @@
#define _IPA_MODEM_H_
struct ipa;
-struct ipa_endpoint;
struct net_device;
struct sk_buff;
@@ -25,7 +24,4 @@ void ipa_modem_exit(struct ipa *ipa);
int ipa_modem_config(struct ipa *ipa);
void ipa_modem_deconfig(struct ipa *ipa);
-int ipa_modem_setup(struct ipa *ipa);
-void ipa_modem_teardown(struct ipa *ipa);
-
#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 4661105ce7ab..90f3aec55b36 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -467,10 +467,7 @@ static const struct qmi_ops ipa_client_ops = {
.new_server = ipa_client_new_server,
};
-/* This is called by ipa_setup(). We can be informed via remoteproc that
- * the modem has shut down, in which case this function will be called
- * again to prepare for it coming back up again.
- */
+/* Set up for QMI message exchange */
int ipa_qmi_setup(struct ipa *ipa)
{
struct ipa_qmi *ipa_qmi = &ipa->qmi;
@@ -526,6 +523,7 @@ err_server_handle_release:
return ret;
}
+/* Tear down IPA QMI handles */
void ipa_qmi_teardown(struct ipa *ipa)
{
cancel_work_sync(&ipa->qmi.init_driver_work);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index b6f2055d35a6..856ef629ccc8 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -39,7 +39,26 @@ struct ipa_qmi {
bool indication_sent;
};
+/**
+ * ipa_qmi_setup() - Set up for QMI message exchange
+ * @ipa: IPA pointer
+ *
+ * This is called at the end of ipa_setup(), to prepare for the exchange
+ * of QMI messages that perform a "handshake" between the AP and modem.
+ * When the modem QMI server announces its presence, an AP request message
+ * supplies operating parameters to be used to the modem, and the modem
+ * acknowledges receipt of those parameters. The modem will not touch the
+ * IPA hardware until this handshake is complete.
+ *
+ * If the modem crashes (or shuts down) a new handshake begins when the
+ * modem's QMI server is started again.
+ */
int ipa_qmi_setup(struct ipa *ipa);
+
+/**
+ * ipa_qmi_teardown() - Tear down IPA QMI handles
+ * @ipa: IPA pointer
+ */
void ipa_qmi_teardown(struct ipa *ipa);
#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index b89dec5865a5..a5b355384d4a 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -99,7 +99,7 @@ struct ipa;
static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
u32 mask)
{
- /* assert(version >= IPA_VERSION_4_0); */
+ WARN_ON(version < IPA_VERSION_4_0);
if (version < IPA_VERSION_4_9)
return u32_encode_bits(mask, GENMASK(20, 17));
@@ -116,7 +116,7 @@ static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
{
u32 val = enable ? 1 : 0;
- /* assert(version >= IPA_VERSION_4_5); */
+ WARN_ON(version < IPA_VERSION_4_5);
if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
return u32_encode_bits(val, GENMASK(21, 21));
@@ -409,7 +409,7 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
val = u32_encode_bits(size, HDR_LEN_FMASK);
if (version < IPA_VERSION_4_5) {
- /* ipa_assert(header_size == size); */
+ WARN_ON(header_size != size);
return val;
}
@@ -429,7 +429,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
if (version < IPA_VERSION_4_5) {
- /* ipa_assert(offset == off); */
+ WARN_ON(offset != off);
return val;
}
@@ -812,7 +812,7 @@ ipa_reg_irq_suspend_info_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
{
- /* assert(version != IPA_VERSION_3_0); */
+ WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003034 + 0x1000 * ee;
@@ -830,7 +830,7 @@ ipa_reg_irq_suspend_en_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
{
- /* assert(version != IPA_VERSION_3_0); */
+ WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003038 + 0x1000 * ee;
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 3b2dc216d3a6..e3da95d69409 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -29,7 +29,6 @@
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
-#ifdef IPA_VALIDATION
u32 group_count;
u32 i;
u32 j;
@@ -65,7 +64,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
-#endif /* !IPA_VALIDATION */
+
return true;
}
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 93270e50b6b3..0d15438a79e2 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -156,11 +156,16 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
if (!smp2p->disabled) {
int ret;
+ /* The clock needs to be active for setup */
+ ipa_clock_get(smp2p->ipa);
+
ret = ipa_setup(smp2p->ipa);
if (ret)
dev_err(&smp2p->ipa->pdev->dev,
"error %d from ipa_setup()\n", ret);
smp2p->disabled = true;
+
+ ipa_clock_put(smp2p->ipa);
}
mutex_unlock(&smp2p->mutex);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index c617a9156f26..2324e1b93e37 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -120,8 +120,6 @@
*/
#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
-#ifdef IPA_VALIDATE
-
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
@@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
- if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+ if (!ipa_cmd_table_valid(ipa, mem, route))
return false;
/* mem->size >= size is sufficient, but we'll demand more */
@@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
return true;
/* Hashed table regions can be zero size if hashing is not supported */
- if (hashed && !mem->size)
+ if (ipa_table_hash_support(ipa) && !mem->size)
return true;
dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
@@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa)
{
bool valid;
- valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
+ valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
+
+ if (!ipa_table_hash_support(ipa))
+ return valid;
+
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
+ false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
+ false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
+ true);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
+ true);
return valid;
}
@@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
return true;
}
-#else /* !IPA_VALIDATE */
-static void ipa_table_validate_build(void)
-
-{
-}
-
-#endif /* !IPA_VALIDATE */
-
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
@@ -233,7 +231,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
if (!count)
return 0;
-/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
+ WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 1e2be9fce2f8..b6a9a0d79d68 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -16,8 +16,6 @@ struct ipa;
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
-#ifdef IPA_VALIDATE
-
/**
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
@@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa);
*/
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
-#else /* !IPA_VALIDATE */
-
-static inline bool ipa_table_valid(struct ipa *ipa)
-{
- return true;
-}
-
-static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
-{
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/**
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index fd9219863234..f88ee02457d4 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -131,7 +131,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
else if (shared->event != IPA_UC_EVENT_LOG_INFO)
- dev_err(dev, "unsupported microcontroller event %hhu\n",
+ dev_err(dev, "unsupported microcontroller event %u\n",
shared->event);
/* The LOG_INFO event can be safely ignored */
}
@@ -140,55 +140,65 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
* should only receive responses from the microcontroller when it has
* sent it a request message.
*
- * We can drop the clock reference taken in ipa_uc_setup() once we
+ * We can drop the clock reference taken in ipa_uc_clock() once we
* know the microcontroller has finished its initialization.
*/
switch (shared->response) {
case IPA_UC_RESPONSE_INIT_COMPLETED:
- ipa->uc_loaded = true;
- ipa_clock_put(ipa);
+ if (ipa->uc_clocked) {
+ ipa->uc_loaded = true;
+ ipa_clock_put(ipa);
+ ipa->uc_clocked = false;
+ } else {
+ dev_warn(dev, "unexpected init_completed response\n");
+ }
break;
default:
- dev_warn(&ipa->pdev->dev,
- "unsupported microcontroller response %hhu\n",
+ dev_warn(dev, "unsupported microcontroller response %u\n",
shared->response);
break;
}
}
-/* ipa_uc_setup() - Set up the microcontroller */
-void ipa_uc_setup(struct ipa *ipa)
+/* Configure the IPA microcontroller subsystem */
+void ipa_uc_config(struct ipa *ipa)
{
- /* The microcontroller needs the IPA clock running until it has
- * completed its initialization. It signals this by sending an
- * INIT_COMPLETED response message to the AP. This could occur after
- * we have finished doing the rest of the IPA initialization, so we
- * need to take an extra "proxy" reference, and hold it until we've
- * received that signal. (This reference is dropped in
- * ipa_uc_response_hdlr(), above.)
- */
- ipa_clock_get(ipa);
-
+ ipa->uc_clocked = false;
ipa->uc_loaded = false;
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
}
-/* Inverse of ipa_uc_setup() */
-void ipa_uc_teardown(struct ipa *ipa)
+/* Inverse of ipa_uc_config() */
+void ipa_uc_deconfig(struct ipa *ipa)
{
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
- if (!ipa->uc_loaded)
+ if (ipa->uc_clocked)
ipa_clock_put(ipa);
}
+/* Take a proxy clock reference for the microcontroller */
+void ipa_uc_clock(struct ipa *ipa)
+{
+ static bool already;
+
+ if (already)
+ return;
+ already = true; /* Only do this on first boot */
+
+ /* This clock reference dropped in ipa_uc_response_hdlr() above */
+ ipa_clock_get(ipa);
+ ipa->uc_clocked = true;
+}
+
/* Send a command to the microcontroller */
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index e8510899a3f0..14e4e1115aa7 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -9,16 +9,30 @@
struct ipa;
/**
- * ipa_uc_setup() - set up the IPA microcontroller subsystem
+ * ipa_uc_config() - Configure the IPA microcontroller subsystem
* @ipa: IPA pointer
*/
-void ipa_uc_setup(struct ipa *ipa);
+void ipa_uc_config(struct ipa *ipa);
/**
- * ipa_uc_teardown() - inverse of ipa_uc_setup()
+ * ipa_uc_deconfig() - Inverse of ipa_uc_config()
* @ipa: IPA pointer
*/
-void ipa_uc_teardown(struct ipa *ipa);
+void ipa_uc_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_uc_clock() - Take a proxy clock reference for the microcontroller
+ * @ipa: IPA pointer
+ *
+ * The first time the modem boots, it loads firmware for and starts the
+ * IPA-resident microcontroller. The microcontroller signals that it
+ * has completed its initialization by sending an INIT_COMPLETED response
+ * message to the AP. The AP must ensure the IPA core clock is operating
+ * until it receives this message, and to do so we take a "proxy" clock
+ * reference on its behalf here. Once we receive the INIT_COMPLETED
+ * message (in ipa_uc_response_hdlr()) we drop this clock reference.
+ */
+void ipa_uc_clock(struct ipa *ipa);
/**
* ipa_uc_panic_notifier()
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index a707502a0c0f..c0b21a5580d5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -732,6 +732,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
port = ipvlan_port_get_rtnl(dev);
switch (event) {
+ case NETDEV_UP:
case NETDEV_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
netif_stacked_transfer_operstate(ipvlan->phy_dev,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 80de9768ecd4..35f46ad040b0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -829,7 +829,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int macvlan_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = macvlan_dev_real_dev(dev);
const struct net_device_ops *ops = real_dev->netdev_ops;
@@ -845,8 +845,8 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
fallthrough;
case SIOCGHWTSTAMP:
- if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
- err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ if (netif_device_present(real_dev) && ops->ndo_eth_ioctl)
+ err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
break;
}
@@ -1151,7 +1151,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
- .ndo_do_ioctl = macvlan_do_ioctl,
+ .ndo_eth_ioctl = macvlan_eth_ioctl,
.ndo_fix_features = macvlan_fix_features,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
new file mode 100644
index 000000000000..d8f966cedc89
--- /dev/null
+++ b/drivers/net/mctp/Kconfig
@@ -0,0 +1,8 @@
+
+if MCTP
+
+menu "MCTP Device Drivers"
+
+endmenu
+
+endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/net/mctp/Makefile
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index e60e38c1f09d..0cc7dcd0ff96 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -205,11 +205,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
mhi_netdev->skbagg_head = NULL;
}
- u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
- u64_stats_inc(&mhi_netdev->stats.rx_packets);
- u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
- u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
-
switch (skb->data[0] & 0xf0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
@@ -222,10 +217,15 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
break;
}
- if (proto && proto->rx)
+ if (proto && proto->rx) {
proto->rx(mhi_netdev, skb);
- else
+ } else {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
netif_rx(skb);
+ }
}
/* Refill if RX buffers queue becomes low */
@@ -329,6 +329,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
mhi_netdev->mdev = mhi_dev;
mhi_netdev->skbagg_head = NULL;
mhi_netdev->proto = info->proto;
+ mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c
index bf1ad863237d..761d90b28ee6 100644
--- a/drivers/net/mhi/proto_mbim.c
+++ b/drivers/net/mhi/proto_mbim.c
@@ -211,6 +211,10 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
continue;
}
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, skbn->len);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
netif_rx(skbn);
}
next_ndp:
@@ -292,7 +296,9 @@ static int mbim_init(struct mhi_net_dev *mhi_netdev)
ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->mtu = MHI_MBIM_DEFAULT_MTU;
- mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU;
+
+ if (!mhi_netdev->mru)
+ mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU;
return 0;
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index ccec29970d5b..ff01e5bdc72e 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -262,29 +262,31 @@ static struct device_type nsim_bus_dev_type = {
};
static struct nsim_bus_dev *
-nsim_bus_dev_new(unsigned int id, unsigned int port_count);
+nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues);
static ssize_t
new_device_store(struct bus_type *bus, const char *buf, size_t count)
{
+ unsigned int id, port_count, num_queues;
struct nsim_bus_dev *nsim_bus_dev;
- unsigned int port_count;
- unsigned int id;
int err;
- err = sscanf(buf, "%u %u", &id, &port_count);
+ err = sscanf(buf, "%u %u %u", &id, &port_count, &num_queues);
switch (err) {
case 1:
port_count = 1;
fallthrough;
case 2:
+ num_queues = 1;
+ fallthrough;
+ case 3:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
return -EINVAL;
}
break;
default:
- pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ pr_err("Format for adding new device is \"id port_count num_queues\" (uint uint unit).\n");
return -EINVAL;
}
@@ -295,7 +297,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
goto err;
}
- nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count, num_queues);
if (IS_ERR(nsim_bus_dev)) {
err = PTR_ERR(nsim_bus_dev);
goto err;
@@ -397,7 +399,7 @@ static struct bus_type nsim_bus = {
#define NSIM_BUS_DEV_MAX_VFS 4
static struct nsim_bus_dev *
-nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues)
{
struct nsim_bus_dev *nsim_bus_dev;
int err;
@@ -413,6 +415,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->num_queues = num_queues;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 6348307bfa84..d538a39d4225 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1431,10 +1431,10 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
struct devlink *devlink;
int err;
- devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ devlink = devlink_alloc_ns(&nsim_dev_devlink_ops, sizeof(*nsim_dev),
+ nsim_bus_dev->initial_net);
if (!devlink)
return -ENOMEM;
- devlink_net_set(devlink, nsim_bus_dev->initial_net);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index c3aeb15843e2..50572e0f1f52 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -347,7 +347,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
struct netdevsim *ns;
int err;
- dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup);
+ dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup,
+ nsim_dev->nsim_bus_dev->num_queues);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -392,7 +393,8 @@ void nsim_destroy(struct netdevsim *ns)
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index ae462957dcee..1c20bcbd9d91 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -352,6 +352,7 @@ struct nsim_bus_dev {
struct device dev;
struct list_head list;
unsigned int port_count;
+ unsigned int num_queues; /* Number of queues for each port on this bus */
struct net *initial_net; /* Purpose of this is to carry net pointer
* during the probe time only.
*/
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c56f703ae998..7564ae0c1997 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -207,6 +207,12 @@ config MARVELL_88X2222_PHY
Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet
Transceiver.
+config MAXLINEAR_GPHY
+ tristate "Maxlinear Ethernet PHYs"
+ help
+ Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
+ GPY241, GPY245 PHYs.
+
config MEDIATEK_GE_PHY
tristate "MediaTek Gigabit Ethernet PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 172bb193ae6a..b2728d00fc9a 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
+obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5d62b85a4024..bdac087058b2 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -532,12 +532,6 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
-static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id)
-{
- return (phydev->phy_id & phydev->drv->phy_id_mask)
- == (phy_id & phydev->drv->phy_id_mask);
-}
-
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -602,8 +596,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
* to the AR8030 so there might be a good chance it works on
* the AR8030 too.
*/
- if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
- at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8030_PHY_ID ||
+ phydev->drv->phy_id == ATH8035_PHY_ID) {
priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
}
@@ -631,7 +625,7 @@ static int at803x_parse_dt(struct phy_device *phydev)
/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
* options.
*/
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
if (of_property_read_bool(node, "qca,keep-pll-enabled"))
priv->flags |= AT803X_KEEP_PLL_ENABLED;
@@ -676,7 +670,7 @@ static int at803x_probe(struct phy_device *phydev)
* Switch to the copper page, as otherwise we read
* the PHY capabilities from the fiber side.
*/
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
phy_lock_mdio_bus(phydev);
ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
phy_unlock_mdio_bus(phydev);
@@ -709,7 +703,7 @@ static int at803x_get_features(struct phy_device *phydev)
if (err)
return err;
- if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID))
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
return 0;
/* AR8031/AR8033 have different status registers
@@ -820,7 +814,7 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
ret = at8031_pll_config(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index f7a2ec150e54..211b5476a6f5 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -326,11 +326,9 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
static int dp8382x_disable_wol(struct phy_device *phydev)
{
- int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
- DP83822_WOL_SECURE_ON;
-
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_WOL_CFG, value);
+ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
+ DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
+ DP83822_WOL_SECURE_ON);
}
static int dp83822_read_status(struct phy_device *phydev)
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index d453ec016168..3c032868ef04 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -8,11 +8,16 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
+#define XWAY_MDIO_MIICTRL 0x17 /* mii control */
#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
#define XWAY_MDIO_LED 0x1B /* led control */
+#define XWAY_MDIO_MIICTRL_RXSKEW_MASK GENMASK(14, 12)
+#define XWAY_MDIO_MIICTRL_TXSKEW_MASK GENMASK(10, 8)
+
/* bit 15:12 are reserved */
#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */
#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */
@@ -157,6 +162,73 @@
#define PHY_ID_PHY11G_VR9_1_2 0xD565A409
#define PHY_ID_PHY22F_VR9_1_2 0xD565A419
+static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500,
+ 3000, 3500};
+
+static int xway_gphy_rgmii_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ unsigned int delay_size = ARRAY_SIZE(xway_internal_delay);
+ s32 int_delay;
+ int val = 0;
+
+ if (!phy_interface_is_rgmii(phydev))
+ return 0;
+
+ /* Existing behavior was to use default pin strapping delay in rgmii
+ * mode, but rgmii should have meant no delay. Warn existing users,
+ * but do not change anything at the moment.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ u16 txskew, rxskew;
+
+ val = phy_read(phydev, XWAY_MDIO_MIICTRL);
+ if (val < 0)
+ return val;
+
+ txskew = FIELD_GET(XWAY_MDIO_MIICTRL_TXSKEW_MASK, val);
+ rxskew = FIELD_GET(XWAY_MDIO_MIICTRL_RXSKEW_MASK, val);
+
+ if (txskew > 0 || rxskew > 0)
+ phydev_warn(phydev,
+ "PHY has delays (e.g. via pin strapping), but phy-mode = 'rgmii'\n"
+ "Should be 'rgmii-id' to use internal delays txskew:%d ps rxskew:%d ps\n",
+ xway_internal_delay[txskew],
+ xway_internal_delay[rxskew]);
+ return 0;
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ int_delay = phy_get_internal_delay(phydev, dev,
+ xway_internal_delay,
+ delay_size, true);
+
+ /* if rx-internal-delay-ps is missing, use default of 2.0 ns */
+ if (int_delay < 0)
+ int_delay = 4; /* 2000 ps */
+
+ val |= FIELD_PREP(XWAY_MDIO_MIICTRL_RXSKEW_MASK, int_delay);
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ int_delay = phy_get_internal_delay(phydev, dev,
+ xway_internal_delay,
+ delay_size, false);
+
+ /* if tx-internal-delay-ps is missing, use default of 2.0 ns */
+ if (int_delay < 0)
+ int_delay = 4; /* 2000 ps */
+
+ val |= FIELD_PREP(XWAY_MDIO_MIICTRL_TXSKEW_MASK, int_delay);
+ }
+
+ return phy_modify(phydev, XWAY_MDIO_MIICTRL,
+ XWAY_MDIO_MIICTRL_RXSKEW_MASK |
+ XWAY_MDIO_MIICTRL_TXSKEW_MASK, val);
+}
+
static int xway_gphy_config_init(struct phy_device *phydev)
{
int err;
@@ -204,6 +276,10 @@ static int xway_gphy_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh);
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl);
+ err = xway_gphy_rgmii_init(phydev);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 53a433442803..0b7cae118ad7 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -28,6 +28,7 @@
#include <linux/marvell_phy.h>
#include <linux/phy.h>
#include <linux/sfp.h>
+#include <linux/netdevice.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
@@ -104,6 +105,16 @@ enum {
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
+ MV_V2_PORT_INTR_STS = 0xf040,
+ MV_V2_PORT_INTR_MASK = 0xf043,
+ MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
+ MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
+ MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
+ MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
+ /* Wake on LAN registers */
+ MV_V2_WOL_CTRL = 0xf06e,
+ MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -1020,6 +1031,80 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
return mv211x_match_phy_device(phydev, false);
}
+static void mv3110_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_WOL_CTRL);
+ if (ret < 0)
+ return;
+
+ if (ret & MV_V2_WOL_CTRL_MAGIC_PKT_EN)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int mv3110_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Enable the WOL interrupt */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_PORT_INTR_MASK,
+ MV_V2_PORT_INTR_STS_WOL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Store the device address for the magic packet */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD2,
+ ((phydev->attached_dev->dev_addr[5] << 8) |
+ phydev->attached_dev->dev_addr[4]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD1,
+ ((phydev->attached_dev->dev_addr[3] << 8) |
+ phydev->attached_dev->dev_addr[2]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD0,
+ ((phydev->attached_dev->dev_addr[1] << 8) |
+ phydev->attached_dev->dev_addr[0]));
+ if (ret < 0)
+ return ret;
+
+ /* Clear WOL status and enable magic packet matching */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN |
+ MV_V2_WOL_CTRL_CLEAR_STS);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable magic packet matching & reset WOL status bit */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN,
+ MV_V2_WOL_CTRL_CLEAR_STS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Reset the clear WOL status bit as it does not self-clear */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_CLEAR_STS);
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -1039,6 +1124,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .get_wol = mv3110_get_wol,
+ .set_wol = mv3110_set_wol,
},
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -1076,6 +1163,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .get_wol = mv3110_get_wol,
+ .set_wol = mv3110_set_wol,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
new file mode 100644
index 000000000000..2d5d5081c3b6
--- /dev/null
+++ b/drivers/net/phy/mxl-gpy.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2021 Maxlinear Corporation
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * Drivers for Maxlinear Ethernet GPY
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+/* PHY ID */
+#define PHY_ID_GPYx15B_MASK 0xFFFFFFFC
+#define PHY_ID_GPY21xB_MASK 0xFFFFFFF9
+#define PHY_ID_GPY2xx 0x67C9DC00
+#define PHY_ID_GPY115B 0x67C9DF00
+#define PHY_ID_GPY115C 0x67C9DF10
+#define PHY_ID_GPY211B 0x67C9DE08
+#define PHY_ID_GPY211C 0x67C9DE10
+#define PHY_ID_GPY212B 0x67C9DE09
+#define PHY_ID_GPY212C 0x67C9DE20
+#define PHY_ID_GPY215B 0x67C9DF04
+#define PHY_ID_GPY215C 0x67C9DF20
+#define PHY_ID_GPY241B 0x67C9DE40
+#define PHY_ID_GPY241BM 0x67C9DE80
+#define PHY_ID_GPY245B 0x67C9DEC0
+
+#define PHY_MIISTAT 0x18 /* MII state */
+#define PHY_IMASK 0x19 /* interrupt mask */
+#define PHY_ISTAT 0x1A /* interrupt status */
+#define PHY_FWV 0x1E /* firmware version */
+
+#define PHY_MIISTAT_SPD_MASK GENMASK(2, 0)
+#define PHY_MIISTAT_DPX BIT(3)
+#define PHY_MIISTAT_LS BIT(10)
+
+#define PHY_MIISTAT_SPD_10 0
+#define PHY_MIISTAT_SPD_100 1
+#define PHY_MIISTAT_SPD_1000 2
+#define PHY_MIISTAT_SPD_2500 4
+
+#define PHY_IMASK_WOL BIT(15) /* Wake-on-LAN */
+#define PHY_IMASK_ANC BIT(10) /* Auto-Neg complete */
+#define PHY_IMASK_ADSC BIT(5) /* Link auto-downspeed detect */
+#define PHY_IMASK_DXMC BIT(2) /* Duplex mode change */
+#define PHY_IMASK_LSPC BIT(1) /* Link speed change */
+#define PHY_IMASK_LSTC BIT(0) /* Link state change */
+#define PHY_IMASK_MASK (PHY_IMASK_LSTC | \
+ PHY_IMASK_LSPC | \
+ PHY_IMASK_DXMC | \
+ PHY_IMASK_ADSC | \
+ PHY_IMASK_ANC)
+
+#define PHY_FWV_REL_MASK BIT(15)
+#define PHY_FWV_TYPE_MASK GENMASK(11, 8)
+#define PHY_FWV_MINOR_MASK GENMASK(7, 0)
+
+/* SGMII */
+#define VSPEC1_SGMII_CTRL 0x08
+#define VSPEC1_SGMII_CTRL_ANEN BIT(12) /* Aneg enable */
+#define VSPEC1_SGMII_CTRL_ANRS BIT(9) /* Restart Aneg */
+#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \
+ VSPEC1_SGMII_CTRL_ANRS)
+
+/* WoL */
+#define VPSPEC2_WOL_CTL 0x0E06
+#define VPSPEC2_WOL_AD01 0x0E08
+#define VPSPEC2_WOL_AD23 0x0E09
+#define VPSPEC2_WOL_AD45 0x0E0A
+#define WOL_EN BIT(0)
+
+static const struct {
+ int type;
+ int minor;
+} ver_need_sgmii_reaneg[] = {
+ {7, 0x6D},
+ {8, 0x6D},
+ {9, 0x73},
+};
+
+static int gpy_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Mask all interrupts */
+ ret = phy_write(phydev, PHY_IMASK, 0);
+ if (ret)
+ return ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
+static int gpy_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->is_c45) {
+ ret = phy_get_c45_ids(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Show GPY PHY FW version in dmesg */
+ ret = phy_read(phydev, PHY_FWV);
+ if (ret < 0)
+ return ret;
+
+ phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret,
+ (ret & PHY_FWV_REL_MASK) ? "release" : "test");
+
+ return 0;
+}
+
+static bool gpy_sgmii_need_reaneg(struct phy_device *phydev)
+{
+ int fw_ver, fw_type, fw_minor;
+ size_t i;
+
+ fw_ver = phy_read(phydev, PHY_FWV);
+ if (fw_ver < 0)
+ return true;
+
+ fw_type = FIELD_GET(PHY_FWV_TYPE_MASK, fw_ver);
+ fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_ver);
+
+ for (i = 0; i < ARRAY_SIZE(ver_need_sgmii_reaneg); i++) {
+ if (fw_type != ver_need_sgmii_reaneg[i].type)
+ continue;
+ if (fw_minor < ver_need_sgmii_reaneg[i].minor)
+ return true;
+ break;
+ }
+
+ return false;
+}
+
+static bool gpy_2500basex_chk(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, PHY_MIISTAT);
+ if (ret < 0) {
+ phydev_err(phydev, "Error: MDIO register access failed: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!(ret & PHY_MIISTAT_LS) ||
+ FIELD_GET(PHY_MIISTAT_SPD_MASK, ret) != PHY_MIISTAT_SPD_2500)
+ return false;
+
+ phydev->speed = SPEED_2500;
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANEN, 0);
+ return true;
+}
+
+static bool gpy_sgmii_aneg_en(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL);
+ if (ret < 0) {
+ phydev_err(phydev, "Error: MMD register access failed: %d\n",
+ ret);
+ return true;
+ }
+
+ return (ret & VSPEC1_SGMII_CTRL_ANEN) ? true : false;
+}
+
+static int gpy_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* Configure half duplex with genphy_setup_forced,
+ * because genphy_c45_pma_setup_forced does not support.
+ */
+ return phydev->duplex != DUPLEX_FULL
+ ? genphy_setup_forced(phydev)
+ : genphy_c45_pma_setup_forced(phydev);
+ }
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_changed(phydev, MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ ret = genphy_c45_check_and_restart_aneg(phydev, changed);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
+ return 0;
+
+ /* No need to trigger re-ANEG if link speed is 2.5G or SGMII ANEG is
+ * disabled.
+ */
+ if (!gpy_sgmii_need_reaneg(phydev) || gpy_2500basex_chk(phydev) ||
+ !gpy_sgmii_aneg_en(phydev))
+ return 0;
+
+ /* There is a design constraint in GPY2xx device where SGMII AN is
+ * only triggered when there is change of speed. If, PHY link
+ * partner`s speed is still same even after PHY TPI is down and up
+ * again, SGMII AN is not triggered and hence no new in-band message
+ * from GPY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior to
+ * MAC. At this condition, once MAC side SGMII is up, MAC side SGMII
+ * wouldn`t receive new in-band message from GPY with correct link
+ * status, speed and duplex info.
+ *
+ * 1) If PHY is already up and TPI link status is still down (such as
+ * hard reboot), TPI link status is polled for 4 seconds before
+ * retriggerring SGMII AN.
+ * 2) If PHY is already up and TPI link status is also up (such as soft
+ * reboot), polling of TPI link status is not needed and SGMII AN is
+ * immediately retriggered.
+ * 3) Other conditions such as PHY is down, speed change etc, skip
+ * retriggering SGMII AN. Note: in case of speed change, GPY FW will
+ * initiate SGMII AN.
+ */
+
+ if (phydev->state != PHY_UP)
+ return 0;
+
+ ret = phy_read_poll_timeout(phydev, MII_BMSR, ret, ret & BMSR_LSTATUS,
+ 20000, 4000000, false);
+ if (ret == -ETIMEDOUT)
+ return 0;
+ else if (ret < 0)
+ return ret;
+
+ /* Trigger SGMII AN. */
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANRS, VSPEC1_SGMII_CTRL_ANRS);
+}
+
+static void gpy_update_interface(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Interface mode is fixed for USXGMII and integrated PHY */
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ /* Automatically switch SERDES interface between SGMII and 2500-BaseX
+ * according to speed. Disable ANEG in 2500-BaseX mode.
+ */
+ switch (phydev->speed) {
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANEN, 0);
+ if (ret < 0)
+ phydev_err(phydev,
+ "Error: Disable of SGMII ANEG failed: %d\n",
+ ret);
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ if (gpy_sgmii_aneg_en(phydev))
+ break;
+ /* Enable and restart SGMII ANEG for 10/100/1000Mbps link speed
+ * if ANEG is disabled (in 2500-BaseX mode).
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_ANEN_ANRS,
+ VSPEC1_SGMII_ANEN_ANRS);
+ if (ret < 0)
+ phydev_err(phydev,
+ "Error: Enable of SGMII ANEG failed: %d\n",
+ ret);
+ break;
+ }
+}
+
+static int gpy_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Read the link partner's 1G advertisement */
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ ret = phy_read(phydev, PHY_MIISTAT);
+ if (ret < 0)
+ return ret;
+
+ phydev->link = (ret & PHY_MIISTAT_LS) ? 1 : 0;
+ phydev->duplex = (ret & PHY_MIISTAT_DPX) ? DUPLEX_FULL : DUPLEX_HALF;
+ switch (FIELD_GET(PHY_MIISTAT_SPD_MASK, ret)) {
+ case PHY_MIISTAT_SPD_10:
+ phydev->speed = SPEED_10;
+ break;
+ case PHY_MIISTAT_SPD_100:
+ phydev->speed = SPEED_100;
+ break;
+ case PHY_MIISTAT_SPD_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case PHY_MIISTAT_SPD_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ if (phydev->link)
+ gpy_update_interface(phydev);
+
+ return 0;
+}
+
+static int gpy_config_intr(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ mask = PHY_IMASK_MASK;
+
+ return phy_write(phydev, PHY_IMASK, mask);
+}
+
+static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, PHY_ISTAT);
+ if (reg < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(reg & PHY_IMASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int gpy_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *attach_dev = phydev->attached_dev;
+ int ret;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* MAC address - Byte0:Byte1:Byte2:Byte3:Byte4:Byte5
+ * VPSPEC2_WOL_AD45 = Byte0:Byte1
+ * VPSPEC2_WOL_AD23 = Byte2:Byte3
+ * VPSPEC2_WOL_AD01 = Byte4:Byte5
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD45,
+ ((attach_dev->dev_addr[0] << 8) |
+ attach_dev->dev_addr[1]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD23,
+ ((attach_dev->dev_addr[2] << 8) |
+ attach_dev->dev_addr[3]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD01,
+ ((attach_dev->dev_addr[4] << 8) |
+ attach_dev->dev_addr[5]));
+ if (ret < 0)
+ return ret;
+
+ /* Enable the WOL interrupt */
+ ret = phy_write(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ /* Enable magic packet matching */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_CTL,
+ WOL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the interrupt status register.
+ * Only WoL is enabled so clear all.
+ */
+ ret = phy_read(phydev, PHY_ISTAT);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable magic packet matching */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_CTL,
+ WOL_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wol->wolopts & WAKE_PHY) {
+ /* Enable the link state change interrupt */
+ ret = phy_set_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the interrupt status register */
+ ret = phy_read(phydev, PHY_ISTAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
+ phy_trigger_machine(phydev);
+
+ return 0;
+ }
+
+ /* Disable the link state change interrupt */
+ return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+}
+
+static void gpy_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
+ if (ret & WOL_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ ret = phy_read(phydev, PHY_IMASK);
+ if (ret & PHY_IMASK_LSTC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int gpy_loopback(struct phy_device *phydev, bool enable)
+{
+ int ret;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ enable ? BMCR_LOOPBACK : 0);
+ if (!ret) {
+ /* It takes some time for PHY device to switch
+ * into/out-of loopback mode.
+ */
+ msleep(100);
+ }
+
+ return ret;
+}
+
+static struct phy_driver gpy_drivers[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
+ .name = "Maxlinear Ethernet GPY2xx",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY115B,
+ .phy_id_mask = PHY_ID_GPYx15B_MASK,
+ .name = "Maxlinear Ethernet GPY115B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
+ .name = "Maxlinear Ethernet GPY115C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY211B,
+ .phy_id_mask = PHY_ID_GPY21xB_MASK,
+ .name = "Maxlinear Ethernet GPY211B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
+ .name = "Maxlinear Ethernet GPY211C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY212B,
+ .phy_id_mask = PHY_ID_GPY21xB_MASK,
+ .name = "Maxlinear Ethernet GPY212B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
+ .name = "Maxlinear Ethernet GPY212C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY215B,
+ .phy_id_mask = PHY_ID_GPYx15B_MASK,
+ .name = "Maxlinear Ethernet GPY215B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
+ .name = "Maxlinear Ethernet GPY215C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY241B),
+ .name = "Maxlinear Ethernet GPY241B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM),
+ .name = "Maxlinear Ethernet GPY241BM",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY245B),
+ .name = "Maxlinear Ethernet GPY245B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+};
+module_phy_driver(gpy_drivers);
+
+static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
+ {PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
+ {PHY_ID_GPY211B, PHY_ID_GPY21xB_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY211C)},
+ {PHY_ID_GPY212B, PHY_ID_GPY21xB_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY212C)},
+ {PHY_ID_GPY215B, PHY_ID_GPYx15B_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY215C)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY241B)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY245B)},
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, gpy_tbl);
+
+MODULE_DESCRIPTION("Maxlinear Ethernet GPY Driver");
+MODULE_AUTHOR("Xu Liang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8eeb26d8aeb7..f124a8a58bd4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -426,7 +426,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
EXPORT_SYMBOL(phy_mii_ioctl);
/**
- * phy_do_ioctl - generic ndo_do_ioctl implementation
+ * phy_do_ioctl - generic ndo_eth_ioctl implementation
* @dev: the net_device struct
* @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute
@@ -441,7 +441,7 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
EXPORT_SYMBOL(phy_do_ioctl);
/**
- * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first
+ * phy_do_ioctl_running - generic ndo_eth_ioctl implementation but test first
*
* @dev: the net_device struct
* @ifr: &struct ifreq for socket ioctl's
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5d5f9a9ee768..107aa6d7bc6b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -969,6 +969,20 @@ void phy_device_remove(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_remove);
/**
+ * phy_get_c45_ids - Read 802.3-c45 IDs for phy device.
+ * @phydev: phy_device structure to read 802.3-c45 IDs
+ *
+ * Returns zero on success, %-EIO on bus access error, or %-ENODEV if
+ * the "devices in package" is invalid.
+ */
+int phy_get_c45_ids(struct phy_device *phydev)
+{
+ return get_phy_c45_ids(phydev->mdio.bus, phydev->mdio.addr,
+ &phydev->c45_ids);
+}
+EXPORT_SYMBOL(phy_get_c45_ids);
+
+/**
* phy_find_first - finds the first PHY device on the bus
* @bus: the target MII bus
*/
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index eb29ef53d971..2cdf9f989dec 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -942,10 +942,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
- phy_duplex_to_str(phydev->duplex));
+ phy_duplex_to_str(phydev->duplex),
+ phylink_pause_to_str(pl->phy_state.pause));
}
static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
@@ -1457,15 +1458,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
return phy_ethtool_ksettings_set(pl->phydev, kset);
}
- linkmode_copy(support, pl->supported);
config = pl->link_config;
- config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE;
- /* Mask out unsupported advertisements, and force the autoneg bit */
+ /* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
- support);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
- config.an_enabled);
+ pl->supported);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
switch (kset->base.autoneg) {
@@ -1474,7 +1471,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- support, false);
+ pl->supported, false);
if (!s)
return -EINVAL;
@@ -1515,6 +1512,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* We have ruled out the case with a PHY attached, and the
* fixed-link cases. All that is left are in-band links.
*/
+ config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
+ config.an_enabled);
+
+ /* Validate without changing the current supported mask. */
+ linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config))
return -EINVAL;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index e26cf91bdec2..82d609401711 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -84,6 +84,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
extra grounds are 18,19,20,21,22,23,24
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -150,7 +151,8 @@ static int plip_hard_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type);
static int plip_open(struct net_device *dev);
static int plip_close(struct net_device *dev);
-static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int plip_preempt(void *handle);
static void plip_wakeup(void *handle);
@@ -265,7 +267,7 @@ static const struct net_device_ops plip_netdev_ops = {
.ndo_open = plip_open,
.ndo_stop = plip_close,
.ndo_start_xmit = plip_tx_packet,
- .ndo_do_ioctl = plip_ioctl,
+ .ndo_siocdevprivate = plip_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1207,7 +1209,8 @@ plip_wakeup(void *handle)
}
static int
-plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct net_local *nl = netdev_priv(dev);
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
@@ -1215,6 +1218,9 @@ plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd != SIOCDEVPLIP)
return -EOPNOTSUPP;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch(pc->pcmd) {
case PLIP_GET_TIMEOUT:
pc->trigger = nl->trigger;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 930e49ef15f6..216a9f4e9750 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1452,11 +1452,11 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
static int
-ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *addr, int cmd)
{
struct ppp *ppp = netdev_priv(dev);
int err = -EFAULT;
- void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
struct ppp_stats stats;
struct ppp_comp_stats cstats;
char *vers;
@@ -1585,7 +1585,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
- .ndo_do_ioctl = ppp_net_ioctl,
+ .ndo_siocdevprivate = ppp_net_siocdevprivate,
.ndo_get_stats64 = ppp_get_stats64,
.ndo_fill_forward_path = ppp_fill_forward_path,
};
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index e88af978f63c..f01c9db01b16 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -78,7 +78,8 @@ struct sb1000_private {
/* prototypes for Linux interface */
extern int sb1000_probe(struct net_device *dev);
static int sb1000_open(struct net_device *dev);
-static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
+static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
@@ -135,7 +136,7 @@ MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
static const struct net_device_ops sb1000_netdev_ops = {
.ndo_open = sb1000_open,
.ndo_start_xmit = sb1000_start_xmit,
- .ndo_do_ioctl = sb1000_dev_ioctl,
+ .ndo_siocdevprivate = sb1000_siocdevprivate,
.ndo_stop = sb1000_close,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -987,7 +988,8 @@ sb1000_open(struct net_device *dev)
return 0; /* Always succeed */
}
-static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
char* name;
unsigned char version[2];
@@ -1011,7 +1013,7 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
stats[2] = dev->stats.rx_packets;
stats[3] = dev->stats.rx_errors;
stats[4] = dev->stats.rx_dropped;
- if(copy_to_user(ifr->ifr_data, stats, sizeof(stats)))
+ if (copy_to_user(data, stats, sizeof(stats)))
return -EFAULT;
status = 0;
break;
@@ -1019,21 +1021,21 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGCMFIRMWARE: /* get firmware version */
if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
return status;
- if(copy_to_user(ifr->ifr_data, version, sizeof(version)))
+ if (copy_to_user(data, version, sizeof(version)))
return -EFAULT;
break;
case SIOCGCMFREQUENCY: /* get frequency */
if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
return status;
- if(put_user(frequency, (int __user *) ifr->ifr_data))
+ if (put_user(frequency, (int __user *)data))
return -EFAULT;
break;
case SIOCSCMFREQUENCY: /* set frequency */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(get_user(frequency, (int __user *) ifr->ifr_data))
+ if (get_user(frequency, (int __user *)data))
return -EFAULT;
if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
return status;
@@ -1042,14 +1044,14 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGCMPIDS: /* get PIDs */
if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
return status;
- if(copy_to_user(ifr->ifr_data, PID, sizeof(PID)))
+ if (copy_to_user(data, PID, sizeof(PID)))
return -EFAULT;
break;
case SIOCSCMPIDS: /* set PIDs */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(copy_from_user(PID, ifr->ifr_data, sizeof(PID)))
+ if (copy_from_user(PID, data, sizeof(PID)))
return -EFAULT;
if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
return status;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index dc84cb844319..5435b5689ce6 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -62,6 +62,7 @@
*/
#define SL_CHECK_TRANSMIT
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -108,7 +109,7 @@ static void slip_unesc6(struct slip *sl, unsigned char c);
#ifdef CONFIG_SLIP_SMART
static void sl_keepalive(struct timer_list *t);
static void sl_outfill(struct timer_list *t);
-static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
#endif
/********************************
@@ -647,7 +648,7 @@ static const struct net_device_ops sl_netdev_ops = {
.ndo_change_mtu = sl_change_mtu,
.ndo_tx_timeout = sl_tx_timeout,
#ifdef CONFIG_SLIP_SMART
- .ndo_do_ioctl = sl_ioctl,
+ .ndo_siocdevprivate = sl_siocdevprivate,
#endif
};
@@ -1179,11 +1180,12 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
/* VSV changes start here */
#ifdef CONFIG_SLIP_SMART
-/* function do_ioctl called from net/core/dev.c
+/* function sl_siocdevprivate called from net/core/dev.c
to allow get/set outfill/keepalive parameter
by ifconfig */
-static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct slip *sl = netdev_priv(dev);
unsigned long *p = (unsigned long *)&rq->ifr_ifru;
@@ -1191,6 +1193,9 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (sl == NULL) /* Allocation failed ?? */
return -ENODEV;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
spin_lock_bh(&sl->lock);
if (!sl->tty) {
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c115216420a..cb01897c7a5d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -197,7 +197,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = asix_ioctl,
.ndo_set_rx_mode = ax88172_set_multicast,
};
@@ -589,7 +589,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
@@ -714,7 +714,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN] = {0}, chipcode = 0;
struct asix_common_private *priv;
int ret, i;
- u32 phyid;
usbnet_get_endpoints(dev, intf);
@@ -762,10 +761,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- /* Read PHYID register *AFTER* the PHY was reset properly */
- phyid = asix_get_phyid(dev);
- netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
-
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
@@ -1100,7 +1095,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = asix_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
@@ -1215,6 +1210,7 @@ static const struct driver_info ax88772b_info = {
.unbind = ax88772_unbind,
.status = asix_status,
.reset = ax88772_reset,
+ .stop = ax88772_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 530947d7477b..d9777d9a7c5d 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -109,7 +109,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index c1316718304d..f25448a08870 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1035,7 +1035,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ax88179_ioctl,
+ .ndo_eth_ioctl = ax88179_ioctl,
.ndo_set_rx_mode = ax88179_set_multicast,
.ndo_set_features = ax88179_set_features,
};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 8d1f69dad603..e1da9102a540 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -253,7 +253,8 @@ static int usbpn_close(struct net_device *dev)
return usb_set_interface(pnd->usb, num, !pnd->active_setting);
}
-static int usbpn_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int usbpn_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct if_phonet_req *req = (struct if_phonet_req *)ifr;
@@ -269,7 +270,7 @@ static const struct net_device_ops usbpn_ops = {
.ndo_open = usbpn_open,
.ndo_stop = usbpn_close,
.ndo_start_xmit = usbpn_xmit,
- .ndo_do_ioctl = usbpn_ioctl,
+ .ndo_siocdevprivate = usbpn_siocdevprivate,
};
static void usbpn_setup(struct net_device *dev)
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 89cc61d7a675..907f98b1eefe 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -345,7 +345,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = dm9601_ioctl,
+ .ndo_eth_ioctl = dm9601_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
.ndo_set_mac_address = dm9601_set_mac_address,
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index dec96e8ab567..827d574f764a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2353,7 +2353,7 @@ static int remove_net_device(struct hso_device *hso_dev)
}
/* Frees our network device */
-static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
+static void hso_free_net_device(struct hso_device *hso_dev)
{
int i;
struct hso_net *hso_net = dev2net(hso_dev);
@@ -2376,7 +2376,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
- if (hso_net->net && !bailout)
+ if (hso_net->net)
free_netdev(hso_net->net);
kfree(hso_dev);
@@ -3133,7 +3133,7 @@ static void hso_free_interface(struct usb_interface *interface)
rfkill_unregister(rfk);
rfkill_destroy(rfk);
}
- hso_free_net_device(network_table[i], false);
+ hso_free_net_device(network_table[i]);
}
}
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 25489389ea49..13f86368b78a 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3601,7 +3601,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_change_mtu = lan78xx_change_mtu,
.ndo_set_mac_address = lan78xx_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = lan78xx_set_multicast,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 2469bdcb1a04..66866bef25df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -464,7 +464,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mcs7830_ioctl,
+ .ndo_eth_ioctl = mcs7830_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
.ndo_set_mac_address = mcs7830_set_mac_address,
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 9a907182569c..0475ef0efdca 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -987,7 +987,8 @@ static const struct ethtool_ops ops = {
.set_link_ksettings = pegasus_set_link_ksettings,
};
-static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq,
+ void __user *udata, int cmd)
{
__u16 *data = (__u16 *) &rq->ifr_ifru;
pegasus_t *pegasus = netdev_priv(net);
@@ -1245,7 +1246,7 @@ static int pegasus_resume(struct usb_interface *intf)
static const struct net_device_ops pegasus_netdev_ops = {
.ndo_open = pegasus_open,
.ndo_stop = pegasus_close,
- .ndo_do_ioctl = pegasus_ioctl,
+ .ndo_siocdevprivate = pegasus_siocdevprivate,
.ndo_start_xmit = pegasus_start_xmit,
.ndo_set_rx_mode = pegasus_set_multicast,
.ndo_tx_timeout = pegasus_tx_timeout,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e09b107b5c99..d7fbc81b518a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9173,7 +9173,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_open = rtl8152_open,
.ndo_stop = rtl8152_close,
- .ndo_do_ioctl = rtl8152_ioctl,
+ .ndo_eth_ioctl = rtl8152_ioctl,
.ndo_start_xmit = rtl8152_start_xmit,
.ndo_tx_timeout = rtl8152_tx_timeout,
.ndo_set_features = rtl8152_set_features,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 7656f2a3afd9..4a1b0e0fc3a3 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -822,7 +822,8 @@ static const struct ethtool_ops ops = {
.get_link_ksettings = rtl8150_get_link_ksettings,
};
-static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq,
+ void __user *udata, int cmd)
{
rtl8150_t *dev = netdev_priv(netdev);
u16 *data = (u16 *) & rq->ifr_ifru;
@@ -850,7 +851,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static const struct net_device_ops rtl8150_netdev_ops = {
.ndo_open = rtl8150_open,
.ndo_stop = rtl8150_close,
- .ndo_do_ioctl = rtl8150_ioctl,
+ .ndo_siocdevprivate = rtl8150_siocdevprivate,
.ndo_start_xmit = rtl8150_start_xmit,
.ndo_tx_timeout = rtl8150_tx_timeout,
.ndo_set_rx_mode = rtl8150_set_multicast,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 13141dbfa3a8..76f7af161313 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1439,7 +1439,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_change_mtu = smsc75xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = smsc75xx_ioctl,
+ .ndo_eth_ioctl = smsc75xx_ioctl,
.ndo_set_rx_mode = smsc75xx_set_multicast,
.ndo_set_features = smsc75xx_set_features,
};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 4c8ee1cff4d4..7d953974eb9b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1044,7 +1044,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = smsc95xx_ioctl,
+ .ndo_eth_ioctl = smsc95xx_ioctl,
.ndo_set_rx_mode = smsc95xx_set_multicast,
.ndo_set_features = smsc95xx_set_features,
};
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index ce29261263cd..6516a37893e2 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -310,7 +310,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = sr9700_ioctl,
+ .ndo_eth_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
.ndo_set_mac_address = sr9700_set_mac_address,
};
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index a822d81310d5..576401c8b1be 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -684,7 +684,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = sr_ioctl,
+ .ndo_eth_ioctl = sr_ioctl,
.ndo_set_rx_mode = sr_set_multicast,
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index bdb7ce3cb054..50eb43e5bf45 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -224,12 +224,13 @@ static void veth_get_channels(struct net_device *dev,
{
channels->tx_count = dev->real_num_tx_queues;
channels->rx_count = dev->real_num_rx_queues;
- channels->max_tx = dev->real_num_tx_queues;
- channels->max_rx = dev->real_num_rx_queues;
- channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
- channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
+ channels->max_tx = dev->num_tx_queues;
+ channels->max_rx = dev->num_rx_queues;
}
+static int veth_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+
static const struct ethtool_ops veth_ethtool_ops = {
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -239,6 +240,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_link_ksettings = veth_get_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
.get_channels = veth_get_channels,
+ .set_channels = veth_set_channels,
};
/* general routines */
@@ -711,7 +713,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
int mac_len, delta, off;
struct xdp_buff xdp;
- skb_orphan_partial(skb);
+ skb_prepare_for_gro(skb);
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -928,12 +930,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
return done;
}
-static int __veth_napi_enable(struct net_device *dev)
+static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
@@ -941,7 +943,7 @@ static int __veth_napi_enable(struct net_device *dev)
goto err_xdp_ring;
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
napi_enable(&rq->xdp_napi);
@@ -949,19 +951,25 @@ static int __veth_napi_enable(struct net_device *dev)
}
return 0;
+
err_xdp_ring:
- for (i--; i >= 0; i--)
+ for (i--; i >= start; i--)
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
return err;
}
-static void veth_napi_del(struct net_device *dev)
+static int __veth_napi_enable(struct net_device *dev)
+{
+ return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
+static void veth_napi_del_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rcu_assign_pointer(priv->rq[i].napi, NULL);
@@ -970,7 +978,7 @@ static void veth_napi_del(struct net_device *dev)
}
synchronize_net();
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->rx_notify_masked = false;
@@ -978,41 +986,90 @@ static void veth_napi_del(struct net_device *dev)
}
}
+static void veth_napi_del(struct net_device *dev)
+{
+ veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
+}
+
static bool veth_gro_requested(const struct net_device *dev)
{
return !!(dev->wanted_features & NETIF_F_GRO);
}
-static int veth_enable_xdp(struct net_device *dev)
+static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
+ bool napi_already_on)
{
- bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
- if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
- err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
- if (err < 0)
- goto err_rxq_reg;
+ if (!napi_already_on)
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
+ if (err < 0)
+ goto err_rxq_reg;
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err < 0)
- goto err_reg_mem;
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err < 0)
+ goto err_reg_mem;
- /* Save original mem info as it can be overwritten */
- rq->xdp_mem = rq->xdp_rxq.mem;
- }
+ /* Save original mem info as it can be overwritten */
+ rq->xdp_mem = rq->xdp_rxq.mem;
+ }
+ return 0;
+
+err_reg_mem:
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+err_rxq_reg:
+ for (i--; i >= start; i--) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (!napi_already_on)
+ netif_napi_del(&rq->xdp_napi);
+ }
+
+ return err;
+}
+
+static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
+ bool delete_napi)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ rq->xdp_rxq.mem = rq->xdp_mem;
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
+ if (delete_napi)
+ netif_napi_del(&rq->xdp_napi);
+ }
+}
+
+static int veth_enable_xdp(struct net_device *dev)
+{
+ bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
+ err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
+ if (err)
+ return err;
if (!napi_already_on) {
err = __veth_napi_enable(dev);
- if (err)
- goto err_rxq_reg;
+ if (err) {
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
+ return err;
+ }
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding XDP
@@ -1030,18 +1087,6 @@ static int veth_enable_xdp(struct net_device *dev)
}
return 0;
-err_reg_mem:
- xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
-err_rxq_reg:
- for (i--; i >= 0; i--) {
- struct veth_rq *rq = &priv->rq[i];
-
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- if (!napi_already_on)
- netif_napi_del(&rq->xdp_napi);
- }
-
- return err;
}
static void veth_disable_xdp(struct net_device *dev)
@@ -1064,28 +1109,23 @@ static void veth_disable_xdp(struct net_device *dev)
}
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
-
- rq->xdp_rxq.mem = rq->xdp_mem;
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- }
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
-static int veth_napi_enable(struct net_device *dev)
+static int veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
}
- err = __veth_napi_enable(dev);
+ err = __veth_napi_enable_range(dev, start, end);
if (err) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_del(&rq->xdp_napi);
@@ -1095,6 +1135,128 @@ static int veth_napi_enable(struct net_device *dev)
return err;
}
+static int veth_napi_enable(struct net_device *dev)
+{
+ return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
+static void veth_disable_range_safe(struct net_device *dev, int start, int end)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ if (start >= end)
+ return;
+
+ if (priv->_xdp_prog) {
+ veth_napi_del_range(dev, start, end);
+ veth_disable_xdp_range(dev, start, end, false);
+ } else if (veth_gro_requested(dev)) {
+ veth_napi_del_range(dev, start, end);
+ }
+}
+
+static int veth_enable_range_safe(struct net_device *dev, int start, int end)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (start >= end)
+ return 0;
+
+ if (priv->_xdp_prog) {
+ /* these channels are freshly initialized, napi is not on there even
+ * when GRO is requeste
+ */
+ err = veth_enable_xdp_range(dev, start, end, false);
+ if (err)
+ return err;
+
+ err = __veth_napi_enable_range(dev, start, end);
+ if (err) {
+ /* on error always delete the newly added napis */
+ veth_disable_xdp_range(dev, start, end, true);
+ return err;
+ }
+ } else if (veth_gro_requested(dev)) {
+ return veth_napi_enable_range(dev, start, end);
+ }
+ return 0;
+}
+
+static int veth_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ unsigned int old_rx_count, new_rx_count;
+ struct veth_priv *peer_priv;
+ struct net_device *peer;
+ int err;
+
+ /* sanity check. Upper bounds are already enforced by the caller */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ /* avoid braking XDP, if that is enabled */
+ peer = rtnl_dereference(priv->peer);
+ peer_priv = peer ? netdev_priv(peer) : NULL;
+ if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
+ return -EINVAL;
+
+ if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
+ return -EINVAL;
+
+ old_rx_count = dev->real_num_rx_queues;
+ new_rx_count = ch->rx_count;
+ if (netif_running(dev)) {
+ /* turn device off */
+ netif_carrier_off(dev);
+ if (peer)
+ netif_carrier_off(peer);
+
+ /* try to allocate new resurces, as needed*/
+ err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
+ if (err)
+ goto out;
+ }
+
+ err = netif_set_real_num_rx_queues(dev, ch->rx_count);
+ if (err)
+ goto revert;
+
+ err = netif_set_real_num_tx_queues(dev, ch->tx_count);
+ if (err) {
+ int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
+
+ /* this error condition could happen only if rx and tx change
+ * in opposite directions (e.g. tx nr raises, rx nr decreases)
+ * and we can't do anything to fully restore the original
+ * status
+ */
+ if (err2)
+ pr_warn("Can't restore rx queues config %d -> %d %d",
+ new_rx_count, old_rx_count, err2);
+ else
+ goto revert;
+ }
+
+out:
+ if (netif_running(dev)) {
+ /* note that we need to swap the arguments WRT the enable part
+ * to identify the range we have to disable
+ */
+ veth_disable_range_safe(dev, new_rx_count, old_rx_count);
+ netif_carrier_on(dev);
+ if (peer)
+ netif_carrier_on(peer);
+ }
+ return err;
+
+revert:
+ new_rx_count = old_rx_count;
+ old_rx_count = ch->rx_count;
+ goto out;
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -1447,6 +1609,23 @@ static void veth_disable_gro(struct net_device *dev)
netdev_update_features(dev);
}
+static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
+{
+ int err;
+
+ if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
+ err = netif_set_real_num_tx_queues(dev, 1);
+ if (err)
+ return err;
+ }
+ if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
+ err = netif_set_real_num_rx_queues(dev, 1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1556,13 +1735,21 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
priv = netdev_priv(dev);
rcu_assign_pointer(priv->peer, peer);
+ err = veth_init_queues(dev, tb);
+ if (err)
+ goto err_queues;
priv = netdev_priv(peer);
rcu_assign_pointer(priv->peer, dev);
+ err = veth_init_queues(peer, tb);
+ if (err)
+ goto err_queues;
veth_disable_gro(dev);
return 0;
+err_queues:
+ unregister_netdevice(dev);
err_register_dev:
/* nothing to do */
err_configure_peer:
@@ -1608,6 +1795,16 @@ static struct net *veth_get_link_net(const struct net_device *dev)
return peer ? dev_net(peer) : dev_net(dev);
}
+static unsigned int veth_get_num_queues(void)
+{
+ /* enforce the same queue limit as rtnl_create_link */
+ int queues = num_possible_cpus();
+
+ if (queues > 4096)
+ queues = 4096;
+ return queues;
+}
+
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
.priv_size = sizeof(struct veth_priv),
@@ -1618,6 +1815,8 @@ static struct rtnl_link_ops veth_link_ops = {
.policy = veth_policy,
.maxtype = VETH_INFO_MAX,
.get_link_net = veth_get_link_net,
+ .get_num_tx_queues = veth_get_num_queues,
+ .get_num_rx_queues = veth_get_num_queues,
};
/*
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index c5a167a1c85c..7a38925f4165 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2020, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 8c014c98471c..f9f3a23d1698 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index a8d5ebd47c71..74d4e8bc4abc 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -98,6 +98,9 @@ enum {
VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
VMXNET3_CMD_GET_COALESCE,
VMXNET3_CMD_GET_RSS_FIELDS,
+ VMXNET3_CMD_GET_RESERVED2,
+ VMXNET3_CMD_GET_RESERVED3,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF,
};
/*
@@ -341,13 +344,15 @@ struct Vmxnet3_RxCompDescExt {
#define VMXNET3_TXD_EOP_SIZE 1
/* value of RxCompDesc.rssType */
-enum {
- VMXNET3_RCD_RSS_TYPE_NONE = 0,
- VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
- VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
- VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
- VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
-};
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV4 5
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV6 6
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV4 7
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV6 8
/* a union for accessing all cmd/completion descriptors */
@@ -533,6 +538,13 @@ enum vmxnet3_intr_type {
/* addition 1 for events */
#define VMXNET3_MAX_INTRS 25
+/* Version 6 and later will use below macros */
+#define VMXNET3_EXT_MAX_TX_QUEUES 32
+#define VMXNET3_EXT_MAX_RX_QUEUES 32
+/* addition 1 for events */
+#define VMXNET3_EXT_MAX_INTRS 65
+#define VMXNET3_FIRST_SET_INTRS 64
+
/* value of intrCtrl */
#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
@@ -547,6 +559,19 @@ struct Vmxnet3_IntrConf {
__le32 reserved[2];
};
+struct Vmxnet3_IntrConfExt {
+ u8 autoMask;
+ u8 numIntrs; /* # of interrupts */
+ u8 eventIntrIdx;
+ u8 reserved;
+ __le32 intrCtrl;
+ __le32 reserved1;
+ u8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for
+ * each intr
+ */
+ u8 reserved2[3];
+};
+
/* one bit per VLAN ID, the size is in the units of u32 */
#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
@@ -719,11 +744,16 @@ struct Vmxnet3_DSDevRead {
struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
};
+struct Vmxnet3_DSDevReadExt {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_IntrConfExt intrConfExt;
+};
+
/* All structures in DriverShared are padded to multiples of 8 bytes */
struct Vmxnet3_DriverShared {
__le32 magic;
/* make devRead start at 64bit boundaries */
- __le32 pad;
+ __le32 size; /* size of DriverShared */
struct Vmxnet3_DSDevRead devRead;
__le32 ecr;
__le32 reserved;
@@ -734,6 +764,7 @@ struct Vmxnet3_DriverShared {
* command
*/
} cu;
+ struct Vmxnet3_DSDevReadExt devReadExt;
};
@@ -764,6 +795,7 @@ struct Vmxnet3_DriverShared {
((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_V6_MAX_MTU 9190
#define VMXNET3_MIN_MTU 60
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6e87f1fc4874..e3c6b7e3bfdd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -1478,10 +1478,28 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
- (adapter->netdev->features & NETIF_F_RXHASH))
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
skb_set_hash(ctx->skb,
le32_to_cpu(rcd->rssHash),
- PKT_HASH_TYPE_L3);
+ hash_type);
+ }
#endif
skb_put(ctx->skb, rcd->len);
@@ -2460,6 +2478,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
int i;
@@ -2572,14 +2591,26 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
#endif /* VMXNET3_RSS */
/* intr settings */
- devRead->intrConf.autoMask = adapter->intr.mask_mode ==
- VMXNET3_IMM_AUTO;
- devRead->intrConf.numIntrs = adapter->intr.num_intrs;
- for (i = 0; i < adapter->intr.num_intrs; i++)
- devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+ if (!VMXNET3_VERSION_GE_6(adapter) ||
+ !adapter->queuesExtEnabled) {
+ devRead->intrConf.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+
+ devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
+ devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
- devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
- devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
+ devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ }
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
@@ -2717,6 +2748,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
+ netif_tx_wake_all_queues(adapter->netdev);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
@@ -3372,6 +3404,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
int size;
int num_tx_queues;
int num_rx_queues;
+ int queues;
+ unsigned long flags;
if (!pci_msi_enabled())
enable_mq = 0;
@@ -3383,7 +3417,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
#endif
num_rx_queues = 1;
- num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -3391,13 +3424,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
num_tx_queues = 1;
- num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
- dev_info(&pdev->dev,
- "# of Tx queues : %d, # of Rx queues : %d\n",
- num_tx_queues, num_rx_queues);
-
if (!netdev)
return -ENOMEM;
@@ -3447,51 +3475,22 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_shared;
}
- adapter->num_rx_queues = num_rx_queues;
- adapter->num_tx_queues = num_tx_queues;
- adapter->rx_buf_per_pkt = 1;
-
- size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
- size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
- adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
- &adapter->queue_desc_pa,
- GFP_KERNEL);
-
- if (!adapter->tqd_start) {
- dev_err(&pdev->dev, "Failed to allocate memory\n");
- err = -ENOMEM;
- goto err_alloc_queue_desc;
- }
- adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
- adapter->num_tx_queues);
-
- adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
- sizeof(struct Vmxnet3_PMConf),
- &adapter->pm_conf_pa,
- GFP_KERNEL);
- if (adapter->pm_conf == NULL) {
- err = -ENOMEM;
- goto err_alloc_pm;
- }
-
-#ifdef VMXNET3_RSS
-
- adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
- sizeof(struct UPT1_RSSConf),
- &adapter->rss_conf_pa,
- GFP_KERNEL);
- if (adapter->rss_conf == NULL) {
- err = -ENOMEM;
- goto err_alloc_rss;
- }
-#endif /* VMXNET3_RSS */
-
err = vmxnet3_alloc_pci_resources(adapter);
if (err < 0)
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_4)) {
+ if (ver & (1 << VMXNET3_REV_6)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_6);
+ adapter->version = VMXNET3_REV_6 + 1;
+ } else if (ver & (1 << VMXNET3_REV_5)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_5);
+ adapter->version = VMXNET3_REV_5 + 1;
+ } else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
@@ -3529,6 +3528,77 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (queues > 0) {
+ adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
+ adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
+ } else {
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
+ adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
+ adapter->queuesExtEnabled = true;
+ } else {
+ adapter->queuesExtEnabled = false;
+ }
+ } else {
+ adapter->queuesExtEnabled = false;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ dev_info(&pdev->dev,
+ "# of Tx queues : %d, # of Rx queues : %d\n",
+ adapter->num_tx_queues, adapter->num_rx_queues);
+
+ adapter->rx_buf_per_pkt = 1;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &adapter->queue_desc_pa,
+ GFP_KERNEL);
+
+ if (!adapter->tqd_start) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ err = -ENOMEM;
+ goto err_ver;
+ }
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
+
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_PMConf),
+ &adapter->pm_conf_pa,
+ GFP_KERNEL);
+ if (adapter->pm_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_pm;
+ }
+
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct UPT1_RSSConf),
+ &adapter->rss_conf_pa,
+ GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
if (VMXNET3_VERSION_GE_3(adapter)) {
adapter->coal_conf =
dma_alloc_coherent(&adapter->pdev->dev,
@@ -3538,7 +3608,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
GFP_KERNEL);
if (!adapter->coal_conf) {
err = -ENOMEM;
- goto err_ver;
+ goto err_coal_conf;
}
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
@@ -3581,9 +3651,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 60 - 9000 */
+ /* MTU range: 60 - 9190 */
netdev->min_mtu = VMXNET3_MIN_MTU;
- netdev->max_mtu = VMXNET3_MAX_MTU;
+ if (VMXNET3_VERSION_GE_6(adapter))
+ netdev->max_mtu = VMXNET3_V6_MAX_MTU;
+ else
+ netdev->max_mtu = VMXNET3_MAX_MTU;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
@@ -3621,9 +3694,7 @@ err_register:
adapter->coal_conf, adapter->coal_conf_pa);
}
vmxnet3_free_intr_resources(adapter);
-err_ver:
- vmxnet3_free_pci_resources(adapter);
-err_alloc_pci:
+err_coal_conf:
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
@@ -3634,7 +3705,9 @@ err_alloc_rss:
err_alloc_pm:
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa);
-err_alloc_queue_desc:
+err_ver:
+ vmxnet3_free_pci_resources(adapter);
+err_alloc_pci:
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
@@ -3653,7 +3726,8 @@ vmxnet3_remove_device(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int size = 0;
- int num_rx_queues;
+ int num_rx_queues, rx_queues;
+ unsigned long flags;
#ifdef VMXNET3_RSS
if (enable_mq)
@@ -3662,7 +3736,24 @@ vmxnet3_remove_device(struct pci_dev *pdev)
else
#endif
num_rx_queues = 1;
- num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ if (!VMXNET3_VERSION_GE_6(adapter)) {
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ }
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (rx_queues > 0)
+ rx_queues = (rx_queues >> 8) & 0xff;
+ else
+ rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ num_rx_queues = min(num_rx_queues, rx_queues);
+ } else {
+ num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ }
cancel_work_sync(&adapter->work);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 1b483cf2b1ca..a3e2f2ba68b5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -787,6 +787,10 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
+ if (VMXNET3_VERSION_GE_6(adapter) &&
+ (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
case SCTP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
@@ -871,6 +875,22 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
case ESP_V6_FLOW:
case AH_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (!VMXNET3_VERSION_GE_6(adapter))
+ return -EOPNOTSUPP;
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index e910596b79cf..7027ff483fa5 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -69,18 +69,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.6.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01050000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01060000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
+#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
@@ -301,15 +303,18 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
-#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */
+
+#define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8
+#define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */
/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */
struct vmxnet3_intr {
@@ -396,6 +401,7 @@ struct vmxnet3_adapter {
dma_addr_t adapter_pa;
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
+ bool queuesExtEnabled;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -421,6 +427,10 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_3 + 1)
#define VMXNET3_VERSION_GE_4(adapter) \
(adapter->version >= VMXNET3_REV_4 + 1)
+#define VMXNET3_VERSION_GE_5(adapter) \
+ (adapter->version >= VMXNET3_REV_5 + 1)
+#define VMXNET3_VERSION_GE_6(adapter) \
+ (adapter->version >= VMXNET3_REV_6 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 059c2f7133be..8dd14d916c3a 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -208,14 +208,12 @@ static int c101_close(struct net_device *dev)
return 0;
}
-static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int c101_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+#ifdef DEBUG_RINGS
port_t *port = dev_to_port(dev);
-#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
@@ -226,14 +224,22 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
- switch (ifr->ifr_settings.type) {
+ return -EOPNOTSUPP;
+}
+
+static int c101_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
+
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -261,7 +267,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -286,7 +292,8 @@ static const struct net_device_ops c101_ops = {
.ndo_open = c101_open,
.ndo_stop = c101_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = c101_ioctl,
+ .ndo_siocwandev = c101_ioctl,
+ .ndo_siocdevprivate = c101_siocdevprivate,
};
static int __init c101_run(unsigned long irq, unsigned long winbase)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 43caab0b7dee..23d2954d9747 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -267,7 +267,6 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
static int cosa_net_tx_done(struct channel_data *channel, int size);
-static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Character device */
static char *chrdev_setup_rx(struct channel_data *channel, int size);
@@ -415,7 +414,7 @@ static const struct net_device_ops cosa_ops = {
.ndo_open = cosa_net_open,
.ndo_stop = cosa_net_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = cosa_net_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
.ndo_tx_timeout = cosa_net_timeout,
};
@@ -1169,18 +1168,6 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
return -ENOIOCTLCMD;
}
-static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- int rv;
- struct channel_data *chan = dev_to_chan(dev);
-
- rv = cosa_ioctl_common(chan->cosa, chan, cmd,
- (unsigned long)ifr->ifr_data);
- if (rv != -ENOIOCTLCMD)
- return rv;
- return hdlc_ioctl(dev, ifr, cmd);
-}
-
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index b3466e084e84..6a212c085435 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1784,16 +1784,15 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
static int
fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
- struct ifreq *ifr)
+ struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
- if (ifr->ifr_settings.size != sizeof(sync))
+ if (ifs->size != sizeof(sync))
return -ENOMEM;
- if (copy_from_user
- (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof(sync)))
+ if (copy_from_user(&sync, ifs->ifs_ifsu.sync, sizeof(sync)))
return -EFAULT;
if (sync.loopback)
@@ -1801,7 +1800,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
i = port->index;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_IFACE_V35:
FST_WRW(card, portConfig[i].lineInterface, V35);
port->hwif = V35;
@@ -1857,7 +1856,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
static int
fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
- struct ifreq *ifr)
+ struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
@@ -1868,29 +1867,29 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
*/
switch (port->hwif) {
case E1:
- ifr->ifr_settings.type = IF_IFACE_E1;
+ ifs->type = IF_IFACE_E1;
break;
case T1:
- ifr->ifr_settings.type = IF_IFACE_T1;
+ ifs->type = IF_IFACE_T1;
break;
case V35:
- ifr->ifr_settings.type = IF_IFACE_V35;
+ ifs->type = IF_IFACE_V35;
break;
case V24:
- ifr->ifr_settings.type = IF_IFACE_V24;
+ ifs->type = IF_IFACE_V24;
break;
case X21D:
- ifr->ifr_settings.type = IF_IFACE_X21D;
+ ifs->type = IF_IFACE_X21D;
break;
case X21:
default:
- ifr->ifr_settings.type = IF_IFACE_X21;
+ ifs->type = IF_IFACE_X21;
break;
}
- if (ifr->ifr_settings.size == 0)
+ if (!ifs->size)
return 0; /* only type requested */
- if (ifr->ifr_settings.size < sizeof(sync))
+ if (ifs->size < sizeof(sync))
return -ENOMEM;
i = port->index;
@@ -1901,15 +1900,15 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof(sync)))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync)))
return -EFAULT;
- ifr->ifr_settings.size = sizeof(sync);
+ ifs->size = sizeof(sync);
return 0;
}
static int
-fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+fst_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct fst_card_info *card;
struct fst_port_info *port;
@@ -1918,7 +1917,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned long flags;
void *buf;
- dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
+ dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, data);
port = dev_to_port(dev);
card = port->card;
@@ -1942,11 +1941,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* First copy in the header with the length and offset of data
* to write
*/
- if (!ifr->ifr_data)
+ if (!data)
return -EINVAL;
- if (copy_from_user(&wrthdr, ifr->ifr_data,
- sizeof(struct fstioc_write)))
+ if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write)))
return -EFAULT;
/* Sanity check the parameters. We don't support partial writes
@@ -1958,7 +1956,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Now copy the data to the card. */
- buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
+ buf = memdup_user(data + sizeof(struct fstioc_write),
wrthdr.size);
if (IS_ERR(buf))
return PTR_ERR(buf);
@@ -1991,12 +1989,12 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
- if (!ifr->ifr_data)
+ if (!data)
return -EINVAL;
gather_conf_info(card, port, &info);
- if (copy_to_user(ifr->ifr_data, &info, sizeof(info)))
+ if (copy_to_user(data, &info, sizeof(info)))
return -EFAULT;
return 0;
@@ -2011,46 +2009,58 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
card->card_no, card->state);
return -EIO;
}
- if (copy_from_user(&info, ifr->ifr_data, sizeof(info)))
+ if (copy_from_user(&info, data, sizeof(info)))
return -EFAULT;
return set_conf_from_info(card, port, &info);
+ default:
+ return -EINVAL;
+ }
+}
- case SIOCWANDEV:
- switch (ifr->ifr_settings.type) {
- case IF_GET_IFACE:
- return fst_get_iface(card, port, ifr);
-
- case IF_IFACE_SYNC_SERIAL:
- case IF_IFACE_V35:
- case IF_IFACE_V24:
- case IF_IFACE_X21:
- case IF_IFACE_X21D:
- case IF_IFACE_T1:
- case IF_IFACE_E1:
- return fst_set_iface(card, port, ifr);
-
- case IF_PROTO_RAW:
- port->mode = FST_RAW;
- return 0;
+static int
+fst_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ struct fst_card_info *card;
+ struct fst_port_info *port;
- case IF_GET_PROTO:
- if (port->mode == FST_RAW) {
- ifr->ifr_settings.type = IF_PROTO_RAW;
- return 0;
- }
- return hdlc_ioctl(dev, ifr, cmd);
+ dbg(DBG_IOCTL, "SIOCDEVPRIVATE, %x\n", ifs->type);
- default:
- port->mode = FST_GEN_HDLC;
- dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
- ifr->ifr_settings.type);
- return hdlc_ioctl(dev, ifr, cmd);
+ port = dev_to_port(dev);
+ card = port->card;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ return fst_get_iface(card, port, ifs);
+
+ case IF_IFACE_SYNC_SERIAL:
+ case IF_IFACE_V35:
+ case IF_IFACE_V24:
+ case IF_IFACE_X21:
+ case IF_IFACE_X21D:
+ case IF_IFACE_T1:
+ case IF_IFACE_E1:
+ return fst_set_iface(card, port, ifs);
+
+ case IF_PROTO_RAW:
+ port->mode = FST_RAW;
+ return 0;
+
+ case IF_GET_PROTO:
+ if (port->mode == FST_RAW) {
+ ifs->type = IF_PROTO_RAW;
+ return 0;
}
+ return hdlc_ioctl(dev, ifs);
default:
- /* Not one of ours. Pass through to HDLC package */
- return hdlc_ioctl(dev, ifr, cmd);
+ port->mode = FST_GEN_HDLC;
+ dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
+ ifs->type);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -2310,7 +2320,8 @@ static const struct net_device_ops fst_ops = {
.ndo_open = fst_open,
.ndo_stop = fst_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = fst_ioctl,
+ .ndo_siocwandev = fst_ioctl,
+ .ndo_siocdevprivate = fst_siocdevprivate,
.ndo_tx_timeout = fst_tx_timeout,
};
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 39f05fabbfa4..cda1b4ce6b21 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -674,31 +674,28 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(te1_settings);
te1_settings line;
struct ucc_hdlc_private *priv = netdev_priv(dev);
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_E1;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_E1;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = priv->clocking;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1053,7 +1050,7 @@ static const struct net_device_ops uhdlc_ops = {
.ndo_open = uhdlc_open,
.ndo_stop = uhdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = uhdlc_ioctl,
+ .ndo_siocwandev = uhdlc_ioctl,
.ndo_tx_timeout = uhdlc_tx_timeout,
};
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index dd6312b69861..cbed10b1d862 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -196,16 +196,13 @@ void hdlc_close(struct net_device *dev)
}
EXPORT_SYMBOL(hdlc_close);
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct hdlc_proto *proto = first_proto;
int result;
- if (cmd != SIOCWANDEV)
- return -EINVAL;
-
if (dev_to_hdlc(dev)->proto) {
- result = dev_to_hdlc(dev)->proto->ioctl(dev, ifr);
+ result = dev_to_hdlc(dev)->proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
}
@@ -213,7 +210,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Not handled by currently attached protocol (if any) */
while (proto) {
- result = proto->ioctl(dev, ifr);
+ result = proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
proto = proto->next;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index c54fdae950fb..cdebe65a7e2d 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -56,7 +56,7 @@ struct cisco_state {
u32 rxseq; /* RX sequence number */
};
-static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct cisco_state *state(hdlc_device *hdlc)
{
@@ -306,21 +306,21 @@ static const struct header_ops cisco_header_ops = {
.create = cisco_hard_header,
};
-static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
+ cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
const size_t size = sizeof(cisco_proto);
cisco_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_CISCO;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_CISCO;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 25e3564ce118..7637edce443e 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -146,7 +146,7 @@ struct frad_state {
u8 rxseq; /* RX sequence number */
};
-static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int fr_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline u16 q922_to_dlci(u8 *hdr)
{
@@ -357,26 +357,26 @@ static int pvc_close(struct net_device *dev)
return 0;
}
-static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pvc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct pvc_device *pvc = dev->ml_priv;
fr_proto_pvc_info info;
- if (ifr->ifr_settings.type == IF_GET_PROTO) {
+ if (ifs->type == IF_GET_PROTO) {
if (dev->type == ARPHRD_ETHER)
- ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
+ ifs->type = IF_PROTO_FR_ETH_PVC;
else
- ifr->ifr_settings.type = IF_PROTO_FR_PVC;
+ ifs->type = IF_PROTO_FR_PVC;
- if (ifr->ifr_settings.size < sizeof(info)) {
+ if (ifs->size < sizeof(info)) {
/* data size wanted */
- ifr->ifr_settings.size = sizeof(info);
+ ifs->size = sizeof(info);
return -ENOBUFS;
}
info.dlci = pvc->dlci;
memcpy(info.master, pvc->frad->name, IFNAMSIZ);
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
+ if (copy_to_user(ifs->ifs_ifsu.fr_pvc_info,
&info, sizeof(info)))
return -EFAULT;
return 0;
@@ -1056,7 +1056,7 @@ static const struct net_device_ops pvc_ops = {
.ndo_open = pvc_open,
.ndo_stop = pvc_close,
.ndo_start_xmit = pvc_xmit,
- .ndo_do_ioctl = pvc_ioctl,
+ .ndo_siocwandev = pvc_ioctl,
};
static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
@@ -1179,22 +1179,22 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int fr_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
+ fr_proto __user *fr_s = ifs->ifs_ifsu.fr;
const size_t size = sizeof(fr_proto);
fr_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
fr_proto_pvc pvc;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_FR;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_FR;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(fr_s, &state(hdlc)->settings, size))
@@ -1256,21 +1256,21 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
+ if (copy_from_user(&pvc, ifs->ifs_ifsu.fr_pvc,
sizeof(fr_proto_pvc)))
return -EFAULT;
if (pvc.dlci <= 0 || pvc.dlci >= 1024)
return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
- if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
- ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
+ if (ifs->type == IF_PROTO_FR_ADD_ETH_PVC ||
+ ifs->type == IF_PROTO_FR_DEL_ETH_PVC)
result = ARPHRD_ETHER; /* bridged Ethernet device */
else
result = ARPHRD_DLCI;
- if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
- ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
+ if (ifs->type == IF_PROTO_FR_ADD_PVC ||
+ ifs->type == IF_PROTO_FR_ADD_ETH_PVC)
return fr_add_pvc(dev, pvc.dlci, result);
else
return fr_del_pvc(hdlc, pvc.dlci, result);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81ecf432a0c..37a3c989cba1 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -100,7 +100,7 @@ static const char *const event_names[EVENTS] = {
static struct sk_buff_head tx_queue; /* used when holding the spin lock */
-static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct ppp *get_ppp(struct net_device *dev)
{
@@ -655,17 +655,17 @@ static const struct header_ops ppp_header_ops = {
.create = ppp_hard_header,
};
-static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ppp *ppp;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_PPP;
+ ifs->type = IF_PROTO_PPP;
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_PPP:
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 54d28496fefd..4a2f068721bc 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -19,7 +19,7 @@
#include <linux/skbuff.h>
-static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int raw_ioctl(struct net_device *dev, struct if_settings *ifs);
static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -33,21 +33,21 @@ static struct hdlc_proto proto = {
};
-static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int raw_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_HDLC;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_HDLC;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 927596276a07..0a66b7356405 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -20,7 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs);
static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
{
@@ -48,22 +48,22 @@ static struct hdlc_proto proto = {
};
-static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
unsigned int old_qlen;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_HDLC_ETH;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 9b7ebf8bd85c..f72c92c24003 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -29,7 +29,7 @@ struct x25_state {
struct tasklet_struct rx_tasklet;
};
-static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int x25_ioctl(struct net_device *dev, struct if_settings *ifs);
static struct x25_state *state(hdlc_device *hdlc)
{
@@ -274,21 +274,21 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int x25_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
+ x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25;
const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
x25_hdlc_proto new_settings;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_X25;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_X25;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(x25_s, &state(hdlc)->settings, size))
@@ -303,7 +303,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EBUSY;
/* backward compatibility */
- if (ifr->ifr_settings.size == 0) {
+ if (ifs->size == 0) {
new_settings.dce = 0;
new_settings.modulo = 8;
new_settings.window = 7;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index fd61a7cc4fdf..15a754310fd7 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -142,11 +142,6 @@ static int hostess_close(struct net_device *d)
return 0;
}
-static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
-{
- return hdlc_ioctl(d, ifr, cmd);
-}
-
/* Passed network frames, fire them downwind.
*/
@@ -171,7 +166,7 @@ static const struct net_device_ops hostess_ops = {
.ndo_open = hostess_open,
.ndo_stop = hostess_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hostess_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
};
static struct z8530_dev *sv11_init(int iobase, int irq)
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 3c51ab239fb2..88a36a069311 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -975,11 +975,10 @@ static int init_hdlc_queues(struct port *port)
return -ENOMEM;
}
- port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys);
if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -1255,23 +1254,20 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
}
}
-static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct port *port = dev_to_port(dev);
unsigned long flags;
int clk;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_V35;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_V35;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&new_line, 0, sizeof(new_line));
@@ -1324,7 +1320,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1336,7 +1332,7 @@ static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hss_hdlc_ioctl,
+ .ndo_siocwandev = hss_hdlc_ioctl,
};
static int hss_init_one(struct platform_device *pdev)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 3bd541c868d5..d7d59b4595f9 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -19,7 +19,7 @@ void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
extern lmc_media_t lmc_ds3_media;
extern lmc_media_t lmc_ssi_media;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6c163db52835..ed687bf6ec47 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -105,7 +105,8 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
* linux reserves 16 device specific IOCTLs. We call them
* LMCIOC* to control various bits of our world.
*/
-int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
+static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
lmc_ctl_t ctl;
@@ -124,7 +125,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
* To date internally, just copy this out to the user.
*/
case LMCIOCGINFO: /*fold01*/
- if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
+ if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
ret = -EFAULT;
else
ret = 0;
@@ -141,7 +142,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+ if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
@@ -171,7 +172,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
+ if (copy_from_user(&new_type, data, sizeof(u16))) {
ret = -EFAULT;
break;
}
@@ -211,8 +212,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
- if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
- sizeof(struct lmc_xinfo)))
+ if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
ret = -EFAULT;
else
ret = 0;
@@ -245,9 +245,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
regVal & T1FRAMER_SEF_MASK;
}
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
+ if (copy_to_user(data, &sc->lmc_device->stats,
sizeof(sc->lmc_device->stats)) ||
- copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
+ copy_to_user(data + sizeof(sc->lmc_device->stats),
&sc->extra_stats, sizeof(sc->extra_stats)))
ret = -EFAULT;
else
@@ -282,7 +282,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+ if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
@@ -314,11 +314,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
#ifdef DEBUG
case LMCIOCDUMPEVENTLOG:
- if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
+ if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
ret = -EFAULT;
break;
}
- if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
+ if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
sizeof(lmcEventLogBuf)))
ret = -EFAULT;
else
@@ -346,7 +346,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
*/
netif_stop_queue(dev);
- if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
+ if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
ret = -EFAULT;
break;
}
@@ -609,10 +609,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
}
break;
- default: /*fold01*/
- /* If we don't know what to do, give the protocol a shot. */
- ret = lmc_proto_ioctl (sc, ifr, cmd);
- break;
+ default:
+ break;
}
return ret;
@@ -788,7 +786,8 @@ static const struct net_device_ops lmc_ops = {
.ndo_open = lmc_open,
.ndo_stop = lmc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = lmc_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
+ .ndo_siocdevprivate = lmc_siocdevprivate,
.ndo_tx_timeout = lmc_driver_timeout,
.ndo_get_stats = lmc_get_stats,
};
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 4e9cc83b615a..e5487616a816 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -58,13 +58,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
}
}
-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
-{
- if (sc->if_type == LMC_PPP)
- return hdlc_ioctl(sc->lmc_device, ifr, cmd);
- return -EOPNOTSUPP;
-}
-
int lmc_proto_open(lmc_softc_t *sc)
{
int ret = 0;
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index bb098e443776..e56e7072de44 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -5,7 +5,6 @@
#include <linux/hdlc.h>
void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
int lmc_proto_open(lmc_softc_t *sc);
void lmc_proto_close(lmc_softc_t *sc);
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index bdb6dc2409bc..f3e80722ba1d 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -227,27 +227,30 @@ static int n2_close(struct net_device *dev)
return 0;
}
-static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int n2_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int n2_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -275,7 +278,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -311,7 +314,8 @@ static const struct net_device_ops n2_ops = {
.ndo_open = n2_open,
.ndo_stop = n2_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = n2_ioctl,
+ .ndo_siocwandev = n2_ioctl,
+ .ndo_siocdevprivate = n2_siocdevprivate,
};
static int __init n2_run(unsigned long io, unsigned long irq,
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 7b123a771aa6..4766446f0fa0 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -174,27 +174,30 @@ static int pc300_close(struct net_device *dev)
return 0;
}
-static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pc300_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- int new_type;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int pc300_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ int new_type;
+ port_t *port = dev_to_port(dev);
- if (ifr->ifr_settings.type == IF_GET_IFACE) {
- ifr->ifr_settings.type = port->iface;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ if (ifs->type == IF_GET_IFACE) {
+ ifs->type = port->iface;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -203,21 +206,21 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
if (port->card->type == PC300_X21 &&
- (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
- ifr->ifr_settings.type == IF_IFACE_X21))
+ (ifs->type == IF_IFACE_SYNC_SERIAL ||
+ ifs->type == IF_IFACE_X21))
new_type = IF_IFACE_X21;
else if (port->card->type == PC300_RSV &&
- (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
- ifr->ifr_settings.type == IF_IFACE_V35))
+ (ifs->type == IF_IFACE_SYNC_SERIAL ||
+ ifs->type == IF_IFACE_V35))
new_type = IF_IFACE_V35;
else if (port->card->type == PC300_RSV &&
- ifr->ifr_settings.type == IF_IFACE_V24)
+ ifs->type == IF_IFACE_V24)
new_type = IF_IFACE_V24;
else
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -272,7 +275,8 @@ static const struct net_device_ops pc300_ops = {
.ndo_open = pc300_open,
.ndo_stop = pc300_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = pc300_ioctl,
+ .ndo_siocwandev = pc300_ioctl,
+ .ndo_siocdevprivate = pc300_siocdevprivate,
};
static int pc300_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index dee9c4e15eca..ea86c7035653 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -167,27 +167,30 @@ static int pci200_close(struct net_device *dev)
return 0;
}
-static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pci200_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int pci200_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_V35;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_V35;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -217,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -253,7 +256,8 @@ static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = pci200_ioctl,
+ .ndo_siocwandev = pci200_ioctl,
+ .ndo_siocdevprivate = pci200_siocdevprivate,
};
static int pci200_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 3092a09d3eaa..469fe979d664 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -119,7 +119,8 @@ static int sbni_open( struct net_device * );
static int sbni_close( struct net_device * );
static netdev_tx_t sbni_start_xmit(struct sk_buff *,
struct net_device * );
-static int sbni_ioctl( struct net_device *, struct ifreq *, int );
+static int sbni_siocdevprivate(struct net_device *, struct ifreq *,
+ void __user *, int);
static void set_multicast_list( struct net_device * );
static irqreturn_t sbni_interrupt( int, void * );
@@ -211,7 +212,7 @@ static const struct net_device_ops sbni_netdev_ops = {
.ndo_stop = sbni_close,
.ndo_start_xmit = sbni_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = sbni_ioctl,
+ .ndo_siocdevprivate = sbni_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1297,7 +1298,7 @@ sbni_card_probe( unsigned long ioaddr )
/* -------------------------------------------------------------------------- */
static int
-sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
+sbni_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct net_local *nl = netdev_priv(dev);
struct sbni_flags flags;
@@ -1310,8 +1311,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
switch( cmd ) {
case SIOCDEVGETINSTATS :
- if (copy_to_user( ifr->ifr_data, &nl->in_stats,
- sizeof(struct sbni_in_stats) ))
+ if (copy_to_user(data, &nl->in_stats,
+ sizeof(struct sbni_in_stats)))
error = -EFAULT;
break;
@@ -1328,7 +1329,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
flags.rxl = nl->cur_rxl_index;
flags.fixed_rxl = nl->delta_rxl == 0;
- if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
+ if (copy_to_user(data, &flags, sizeof(flags)))
error = -EFAULT;
break;
@@ -1358,7 +1359,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
+ if (copy_from_user(slave_name, data, sizeof(slave_name)))
return -EFAULT;
slave_dev = dev_get_by_name(&init_net, slave_name );
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4403e219ca03..eddd20aab691 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -124,14 +124,6 @@ static int sealevel_close(struct net_device *d)
return 0;
}
-static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
-{
- /* struct slvl_device *slvl=dev_to_chan(d);
- * z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd)
- */
- return hdlc_ioctl(d, ifr, cmd);
-}
-
/* Passed network frames, fire them downwind. */
static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
@@ -152,7 +144,7 @@ static const struct net_device_ops sealevel_ops = {
.ndo_open = sealevel_open,
.ndo_stop = sealevel_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = sealevel_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
};
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index f22e48415e6f..5a9e262188ef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -343,20 +343,17 @@ static int wanxl_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
struct port *port = dev_to_port(dev);
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
@@ -364,7 +361,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
line.clock_rate = 0;
line.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
@@ -374,7 +371,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
+ if (copy_from_user(&line, ifs->ifs_ifsu.sync,
size))
return -EFAULT;
@@ -389,7 +386,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -545,7 +542,7 @@ static const struct net_device_ops wanxl_ops = {
.ndo_open = wanxl_open,
.ndo_stop = wanxl_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = wanxl_ioctl,
+ .ndo_siocwandev = wanxl_ioctl,
.ndo_get_stats = wanxl_get_stats,
};
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index fd37d4d2983b..65dd8cff1b01 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1144,7 +1144,7 @@ static int waitbusy(struct airo_info *ai);
static irqreturn_t airo_interrupt(int irq, void* dev_id);
static int airo_thread(void *data);
static void timer_func(struct net_device *dev);
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *, int cmd);
static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
@@ -2664,7 +2664,7 @@ static const struct net_device_ops airo11_netdev_ops = {
.ndo_start_xmit = airo_start_xmit11,
.ndo_get_stats = airo_get_stats,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
};
static void wifi_setup(struct net_device *dev)
@@ -2764,7 +2764,7 @@ static const struct net_device_ops airo_netdev_ops = {
.ndo_get_stats = airo_get_stats,
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
.ndo_validate_addr = eth_validate_addr,
};
@@ -2775,7 +2775,7 @@ static const struct net_device_ops mpi_netdev_ops = {
.ndo_get_stats = airo_get_stats,
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
.ndo_validate_addr = eth_validate_addr,
};
@@ -7661,7 +7661,8 @@ static const struct iw_handler_def airo_handler_def =
* Javier Achirica did a great job of merging code from the unnamed CISCO
* developer that added support for flashing the card.
*/
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
int rc = 0;
struct airo_info *ai = dev->ml_priv;
@@ -7678,7 +7679,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int val = AIROMAGIC;
aironet_ioctl com;
- if (copy_from_user(&com, rq->ifr_data, sizeof(com)))
+ if (copy_from_user(&com, data, sizeof(com)))
rc = -EFAULT;
else if (copy_to_user(com.data, (char *)&val, sizeof(val)))
rc = -EFAULT;
@@ -7694,7 +7695,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*/
{
aironet_ioctl com;
- if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
+ if (copy_from_user(&com, data, sizeof(com))) {
rc = -EFAULT;
break;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
index c4b81ff7d7e4..c17ab6dbbb53 100644
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap.h
@@ -93,6 +93,7 @@ extern const struct iw_handler_def hostap_iw_handler_def;
extern const struct ethtool_ops prism2_ethtool_ops;
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-
+int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 49766b285230..0a376f112db9 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3941,7 +3941,8 @@ const struct iw_handler_def hostap_iw_handler_def =
.get_wireless_stats = hostap_get_wireless_stats,
};
-
+/* Private ioctls (iwpriv) that have not yet been converted
+ * into new wireless extensions API */
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct iwreq *wrq = (struct iwreq *) ifr;
@@ -3953,9 +3954,6 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
local = iface->local;
switch (cmd) {
- /* Private ioctls (iwpriv) that have not yet been converted
- * into new wireless extensions API */
-
case PRISM2_IOCTL_INQUIRE:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name);
@@ -4009,11 +4007,31 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.ap_addr.sa_data);
break;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+/* Private ioctls that are not used with iwpriv;
+ * in SIOCDEVPRIVATE range */
+int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)ifr;
+ struct hostap_interface *iface;
+ local_info_t *local;
+ int ret = 0;
- /* Private ioctls that are not used with iwpriv;
- * in SIOCDEVPRIVATE range */
+ iface = netdev_priv(dev);
+ local = iface->local;
+
+ if (in_compat_syscall()) /* not implemented yet */
+ return -EOPNOTSUPP;
+ switch (cmd) {
#ifdef PRISM2_DOWNLOAD_SUPPORT
case PRISM2_IOCTL_DOWNLOAD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index de97b3304115..54f67b682b6a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -797,6 +797,7 @@ static const struct net_device_ops hostap_netdev_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
@@ -809,6 +810,7 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
@@ -821,6 +823,7 @@ static const struct net_device_ops hostap_master_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 7f7d364d3a51..2fe88b8be348 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -479,6 +479,7 @@ static struct pci_driver iosm_ipc_driver = {
},
.id_table = iosm_ipc_ids,
};
+module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
@@ -560,21 +561,3 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
-
-static int __init iosm_ipc_driver_init(void)
-{
- if (pci_register_driver(&iosm_ipc_driver)) {
- pr_err("registering of IOSM PCIe driver failed");
- return -1;
- }
-
- return 0;
-}
-
-static void __exit iosm_ipc_driver_exit(void)
-{
- pci_unregister_driver(&iosm_ipc_driver);
-}
-
-module_init(iosm_ipc_driver_init);
-module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 528745862738..c6b3334f24c9 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -38,7 +38,7 @@
#define NCI_OP_PROP_SET_PDATA_OID 0x23
struct fdp_nci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
struct fdp_i2c_phy *phy;
struct nci_dev *ndev;
@@ -52,7 +52,7 @@ struct fdp_nci_info {
u32 limited_otp_version;
u8 key_index;
- u8 *fw_vsc_cfg;
+ const u8 *fw_vsc_cfg;
u8 clock_type;
u32 clock_freq;
@@ -65,7 +65,7 @@ struct fdp_nci_info {
wait_queue_head_t setup_wq;
};
-static u8 nci_core_get_config_otp_ram_version[5] = {
+static const u8 nci_core_get_config_otp_ram_version[5] = {
0x04,
NCI_PARAM_ID_FW_RAM_VERSION,
NCI_PARAM_ID_FW_OTP_VERSION,
@@ -111,7 +111,7 @@ static inline int fdp_nci_patch_cmd(struct nci_dev *ndev, u8 type)
}
static inline int fdp_nci_set_production_data(struct nci_dev *ndev, u8 len,
- char *data)
+ const char *data)
{
return nci_prop_cmd(ndev, NCI_OP_PROP_SET_PDATA_OID, len, data);
}
@@ -236,7 +236,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
static int fdp_nci_open(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
+ const struct fdp_nci_info *info = nci_get_drvdata(ndev);
return info->phy_ops->enable(info->phy);
}
@@ -260,7 +260,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- u8 *data;
+ const u8 *data;
int r;
r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev);
@@ -269,15 +269,15 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
return r;
}
- data = (u8 *) info->ram_patch->data;
+ data = info->ram_patch->data;
info->ram_patch_version =
data[FDP_FW_HEADER_SIZE] |
(data[FDP_FW_HEADER_SIZE + 1] << 8) |
(data[FDP_FW_HEADER_SIZE + 2] << 16) |
(data[FDP_FW_HEADER_SIZE + 3] << 24);
- dev_dbg(dev, "RAM patch version: %d, size: %d\n",
- info->ram_patch_version, (int) info->ram_patch->size);
+ dev_dbg(dev, "RAM patch version: %d, size: %zu\n",
+ info->ram_patch_version, info->ram_patch->size);
r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev);
@@ -293,8 +293,8 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
(data[FDP_FW_HEADER_SIZE+2] << 16) |
(data[FDP_FW_HEADER_SIZE+3] << 24);
- dev_dbg(dev, "OTP patch version: %d, size: %d\n",
- info->otp_patch_version, (int) info->otp_patch->size);
+ dev_dbg(dev, "OTP patch version: %d, size: %zu\n",
+ info->otp_patch_version, info->otp_patch->size);
return 0;
}
@@ -610,8 +610,9 @@ static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev,
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- struct nci_core_get_config_rsp *rsp = (void *) skb->data;
- u8 i, *p;
+ const struct nci_core_get_config_rsp *rsp = (void *) skb->data;
+ unsigned int i;
+ const u8 *p;
if (rsp->status == NCI_STATUS_OK) {
@@ -651,7 +652,7 @@ static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev,
return 0;
}
-static struct nci_driver_ops fdp_core_ops[] = {
+static const struct nci_driver_ops fdp_core_ops[] = {
{
.opcode = NCI_OP_CORE_GET_CONFIG_RSP,
.rsp = fdp_nci_core_get_config_rsp_packet,
@@ -662,7 +663,7 @@ static struct nci_driver_ops fdp_core_ops[] = {
},
};
-static struct nci_driver_ops fdp_prop_ops[] = {
+static const struct nci_driver_ops fdp_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROP, NCI_OP_PROP_PATCH_OID),
.rsp = fdp_nci_prop_patch_rsp_packet,
@@ -675,7 +676,7 @@ static struct nci_driver_ops fdp_prop_ops[] = {
},
};
-static struct nci_ops nci_ops = {
+static const struct nci_ops nci_ops = {
.open = fdp_nci_open,
.close = fdp_nci_close,
.send = fdp_nci_send,
@@ -687,10 +688,10 @@ static struct nci_ops nci_ops = {
.n_core_ops = ARRAY_SIZE(fdp_core_ops),
};
-int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+int fdp_nci_probe(struct fdp_i2c_phy *phy, const struct nfc_phy_ops *phy_ops,
struct nci_dev **ndevp, int tx_headroom,
int tx_tailroom, u8 clock_type, u32 clock_freq,
- u8 *fw_vsc_cfg)
+ const u8 *fw_vsc_cfg)
{
struct device *dev = &phy->i2c_dev->dev;
struct fdp_nci_info *info;
@@ -718,6 +719,7 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
NFC_PROTO_NFC_DEP_MASK |
NFC_PROTO_ISO15693_MASK;
+ BUILD_BUG_ON(ARRAY_SIZE(fdp_prop_ops) > NCI_MAX_PROPRIETARY_CMD);
ndev = nci_allocate_device(&nci_ops, protocols, tx_headroom,
tx_tailroom);
if (!ndev) {
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
index ead3b21ccae6..2e9161a4d7bf 100644
--- a/drivers/nfc/fdp/fdp.h
+++ b/drivers/nfc/fdp/fdp.h
@@ -21,9 +21,9 @@ struct fdp_i2c_phy {
uint16_t next_read_size;
};
-int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+int fdp_nci_probe(struct fdp_i2c_phy *phy, const struct nfc_phy_ops *phy_ops,
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
- u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
+ u8 clock_type, u32 clock_freq, const u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index c5596e514648..051c43a2a52f 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -36,7 +36,7 @@
print_hex_dump(KERN_DEBUG, prefix": ", DUMP_PREFIX_OFFSET, \
16, 1, (skb)->data, (skb)->len, 0)
-static void fdp_nci_i2c_reset(struct fdp_i2c_phy *phy)
+static void fdp_nci_i2c_reset(const struct fdp_i2c_phy *phy)
{
/* Reset RST/WakeUP for at least 100 micro-second */
gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_OFF);
@@ -47,7 +47,7 @@ static void fdp_nci_i2c_reset(struct fdp_i2c_phy *phy)
static int fdp_nci_i2c_enable(void *phy_id)
{
- struct fdp_i2c_phy *phy = phy_id;
+ const struct fdp_i2c_phy *phy = phy_id;
fdp_nci_i2c_reset(phy);
@@ -56,7 +56,7 @@ static int fdp_nci_i2c_enable(void *phy_id)
static void fdp_nci_i2c_disable(void *phy_id)
{
- struct fdp_i2c_phy *phy = phy_id;
+ const struct fdp_i2c_phy *phy = phy_id;
fdp_nci_i2c_reset(phy);
}
@@ -120,7 +120,7 @@ static int fdp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
return r;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = fdp_nci_i2c_write,
.enable = fdp_nci_i2c_enable,
.disable = fdp_nci_i2c_disable,
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index e56cea716cd2..f9cca885beec 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -202,7 +202,7 @@ err:
return r;
}
-static int mei_nfc_send(struct nfc_mei_phy *phy, u8 *buf, size_t length)
+static int mei_nfc_send(struct nfc_mei_phy *phy, const u8 *buf, size_t length)
{
struct mei_nfc_hdr *hdr;
u8 *mei_buf;
@@ -362,7 +362,7 @@ static void nfc_mei_phy_disable(void *phy_id)
phy->powered = 0;
}
-struct nfc_phy_ops mei_phy_ops = {
+const struct nfc_phy_ops mei_phy_ops = {
.write = nfc_mei_phy_write,
.enable = nfc_mei_phy_enable,
.disable = nfc_mei_phy_disable,
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index 51bd44f5f3b8..2b1edb3eba15 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -45,7 +45,7 @@ struct nfc_mei_phy {
int hard_fault;
};
-extern struct nfc_phy_ops mei_phy_ops;
+extern const struct nfc_phy_ops mei_phy_ops;
struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device);
void nfc_mei_phy_free(struct nfc_mei_phy *phy);
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index dd78d987e6c9..86f593c73ed6 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -73,7 +73,7 @@ static void microread_i2c_remove_len_crc(struct sk_buff *skb)
skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM);
}
-static int check_crc(struct sk_buff *skb)
+static int check_crc(const struct sk_buff *skb)
{
int i;
u8 crc = 0;
@@ -225,7 +225,7 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = microread_i2c_write,
.enable = microread_i2c_enable,
.disable = microread_i2c_disable,
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index b1d3975e8a81..9d83ccebd434 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -131,7 +131,7 @@
#define MICROREAD_ELT_ID_SE2 0x04
#define MICROREAD_ELT_ID_SE3 0x05
-static struct nfc_hci_gate microread_gates[] = {
+static const struct nfc_hci_gate microread_gates[] = {
{MICROREAD_GATE_ID_ADM, MICROREAD_PIPE_ID_ADMIN},
{MICROREAD_GATE_ID_LOOPBACK, MICROREAD_PIPE_ID_HDS_LOOPBACK},
{MICROREAD_GATE_ID_IDT, MICROREAD_PIPE_ID_HDS_IDT},
@@ -152,7 +152,7 @@ static struct nfc_hci_gate microread_gates[] = {
#define MICROREAD_CMD_TAILROOM 2
struct microread_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -358,7 +358,7 @@ static int microread_complete_target_discovered(struct nfc_hci_dev *hdev,
static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
int err)
{
- struct microread_info *info = context;
+ const struct microread_info *info = context;
switch (info->async_cb_type) {
case MICROREAD_CB_TYPE_READER_ALL:
@@ -625,7 +625,7 @@ static int microread_event_received(struct nfc_hci_dev *hdev, u8 pipe,
return r;
}
-static struct nfc_hci_ops microread_hci_ops = {
+static const struct nfc_hci_ops microread_hci_ops = {
.open = microread_open,
.close = microread_close,
.hci_ready = microread_hci_ready,
@@ -641,9 +641,9 @@ static struct nfc_hci_ops microread_hci_ops = {
.event_received = microread_event_received,
};
-int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev)
+int microread_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ const char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev)
{
struct microread_info *info;
unsigned long quirks = 0;
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
index 044f5e456375..2ee7ccfa22dd 100644
--- a/drivers/nfc/microread/microread.h
+++ b/drivers/nfc/microread/microread.h
@@ -10,9 +10,9 @@
#define DRIVER_DESC "NFC driver for microread"
-int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev);
+int microread_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ const char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev);
void microread_remove(struct nfc_hci_dev *hdev);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index aaccb8b76b3e..edac56b01fd1 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -129,7 +129,7 @@ static void fw_dnld_timeout(struct timer_list *t)
}
static int process_state_reset(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_reset_ntf) != skb->len ||
memcmp(skb->data, nci_pattern_core_reset_ntf,
@@ -145,7 +145,8 @@ static int process_state_reset(struct nfcmrvl_private *priv,
return 0;
}
-static int process_state_init(struct nfcmrvl_private *priv, struct sk_buff *skb)
+static int process_state_init(struct nfcmrvl_private *priv,
+ const struct sk_buff *skb)
{
struct nci_core_set_config_cmd cmd;
@@ -175,7 +176,7 @@ static void create_lc(struct nfcmrvl_private *priv)
}
static int process_state_set_ref_clock(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct nci_core_set_config_cmd cmd;
@@ -221,7 +222,7 @@ static int process_state_set_ref_clock(struct nfcmrvl_private *priv,
}
static int process_state_set_hi_config(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
@@ -232,7 +233,7 @@ static int process_state_set_hi_config(struct nfcmrvl_private *priv,
}
static int process_state_open_lc(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_conn_create_rsp) >= skb->len ||
memcmp(skb->data, nci_pattern_core_conn_create_rsp,
@@ -347,7 +348,7 @@ static int process_state_fw_dnld(struct nfcmrvl_private *priv,
}
static int process_state_close_lc(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_conn_close_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_core_conn_close_rsp, skb->len))
@@ -358,7 +359,8 @@ static int process_state_close_lc(struct nfcmrvl_private *priv,
return 0;
}
-static int process_state_boot(struct nfcmrvl_private *priv, struct sk_buff *skb)
+static int process_state_boot(struct nfcmrvl_private *priv,
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_proprietary_boot_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_proprietary_boot_rsp, skb->len))
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 59a529e72d96..c38b228006fd 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -146,7 +146,7 @@ static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
{
}
-static struct nfcmrvl_if_ops i2c_ops = {
+static const struct nfcmrvl_if_ops i2c_ops = {
.nci_open = nfcmrvl_i2c_nci_open,
.nci_close = nfcmrvl_i2c_nci_close,
.nci_send = nfcmrvl_i2c_nci_send,
@@ -182,8 +182,8 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
static int nfcmrvl_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_i2c_drv_data *drv_data;
- struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_platform_data config;
int ret;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index a4620b480c4f..2fcf545012b1 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -81,7 +81,7 @@ static int nfcmrvl_nci_fw_download(struct nci_dev *ndev,
return nfcmrvl_fw_dnld_start(ndev, firmware_name);
}
-static struct nci_ops nfcmrvl_nci_ops = {
+static const struct nci_ops nfcmrvl_nci_ops = {
.open = nfcmrvl_nci_open,
.close = nfcmrvl_nci_close,
.send = nfcmrvl_nci_send,
@@ -91,9 +91,9 @@ static struct nci_ops nfcmrvl_nci_ops = {
struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
void *drv_data,
- struct nfcmrvl_if_ops *ops,
+ const struct nfcmrvl_if_ops *ops,
struct device *dev,
- struct nfcmrvl_platform_data *pdata)
+ const struct nfcmrvl_platform_data *pdata)
{
struct nfcmrvl_private *priv;
int rc;
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index a715543bc9bf..165bd0a95190 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -77,7 +77,7 @@ struct nfcmrvl_private {
/* PHY type */
enum nfcmrvl_phy phy;
/* Low level driver ops */
- struct nfcmrvl_if_ops *if_ops;
+ const struct nfcmrvl_if_ops *if_ops;
};
struct nfcmrvl_if_ops {
@@ -92,9 +92,9 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
void *drv_data,
- struct nfcmrvl_if_ops *ops,
+ const struct nfcmrvl_if_ops *ops,
struct device *dev,
- struct nfcmrvl_platform_data *pdata);
+ const struct nfcmrvl_platform_data *pdata);
void nfcmrvl_chip_reset(struct nfcmrvl_private *priv);
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 66696321c645..b182ab2e03c0 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -99,7 +99,7 @@ static void nfcmrvl_spi_nci_update_config(struct nfcmrvl_private *priv,
drv_data->nci_spi->xfer_speed_hz = config->clk;
}
-static struct nfcmrvl_if_ops spi_ops = {
+static const struct nfcmrvl_if_ops spi_ops = {
.nci_open = nfcmrvl_spi_nci_open,
.nci_close = nfcmrvl_spi_nci_close,
.nci_send = nfcmrvl_spi_nci_send,
@@ -129,7 +129,7 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
static int nfcmrvl_spi_probe(struct spi_device *spi)
{
- struct nfcmrvl_platform_data *pdata;
+ const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_platform_data config;
struct nfcmrvl_spi_drv_data *drv_data;
int ret = 0;
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 50d86c90b9dd..9c92cbdc42f0 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -49,7 +49,7 @@ static void nfcmrvl_uart_nci_update_config(struct nfcmrvl_private *priv,
config->flow_control);
}
-static struct nfcmrvl_if_ops uart_ops = {
+static const struct nfcmrvl_if_ops uart_ops = {
.nci_open = nfcmrvl_uart_nci_open,
.nci_close = nfcmrvl_uart_nci_close,
.nci_send = nfcmrvl_uart_nci_send,
@@ -98,8 +98,8 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
{
struct nfcmrvl_private *priv;
- struct nfcmrvl_platform_data *pdata = NULL;
struct nfcmrvl_platform_data config;
+ const struct nfcmrvl_platform_data *pdata = NULL;
struct device *dev = nu->tty->dev;
/*
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 9d649b45300b..a99aedff795d 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -264,7 +264,7 @@ done:
return err;
}
-static struct nfcmrvl_if_ops usb_ops = {
+static const struct nfcmrvl_if_ops usb_ops = {
.nci_open = nfcmrvl_usb_nci_open,
.nci_close = nfcmrvl_usb_nci_close,
.nci_send = nfcmrvl_usb_nci_send,
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index dd27c85190d3..85bf8d586c70 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -239,7 +239,7 @@ static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb,
static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev)
{
- struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
+ const struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
nfcsim_link_recv_cancel(dev->link_in);
}
@@ -319,7 +319,7 @@ static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
return nfcsim_send(ddev, NULL, timeout, cb, arg);
}
-static struct nfc_digital_ops nfcsim_digital_ops = {
+static const struct nfc_digital_ops nfcsim_digital_ops = {
.in_configure_hw = nfcsim_in_configure_hw,
.in_send_cmd = nfcsim_in_send_cmd,
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 2b0c7232e91f..518e2afb43a8 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -83,7 +83,7 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return r;
}
-static struct nci_ops nxp_nci_ops = {
+static const struct nci_ops nxp_nci_ops = {
.open = nxp_nci_open,
.close = nxp_nci_close,
.send = nxp_nci_send,
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index cd64bfe20402..2f3f3fe9a0ba 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2623,7 +2623,7 @@ static int pn533_dev_down(struct nfc_dev *nfc_dev)
return ret;
}
-static struct nfc_ops pn533_nfc_ops = {
+static const struct nfc_ops pn533_nfc_ops = {
.dev_up = pn533_dev_up,
.dev_down = pn533_dev_down,
.dep_link_up = pn533_dep_link_up,
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index de59e439c369..37d26f01986b 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -515,7 +515,7 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = pn544_hci_i2c_write,
.enable = pn544_hci_i2c_enable,
.disable = pn544_hci_i2c_disable,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index b788870473e8..092f03b80a78 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -86,7 +86,7 @@ enum pn544_state {
#define PN544_HCI_CMD_ATTREQUEST 0x12
#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
-static struct nfc_hci_gate pn544_gates[] = {
+static const struct nfc_hci_gate pn544_gates[] = {
{NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
{NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
@@ -108,7 +108,7 @@ static struct nfc_hci_gate pn544_gates[] = {
#define PN544_CMDS_HEADROOM 2
struct pn544_hci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -809,7 +809,7 @@ static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
#define PN544_SE_MODE_ON 0x01
static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
{
- struct nfc_se *se;
+ const struct nfc_se *se;
u8 enable = PN544_SE_MODE_ON;
static struct uicc_gatelist {
u8 head;
@@ -864,7 +864,7 @@ static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
{
- struct nfc_se *se;
+ const struct nfc_se *se;
u8 disable = PN544_SE_MODE_OFF;
se = nfc_find_se(hdev->ndev, se_idx);
@@ -881,7 +881,7 @@ static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
}
}
-static struct nfc_hci_ops pn544_hci_ops = {
+static const struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
.hci_ready = pn544_hci_ready,
@@ -901,9 +901,10 @@ static struct nfc_hci_ops pn544_hci_ops = {
.disable_se = pn544_hci_disable_se,
};
-int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- fw_download_t fw_download, struct nfc_hci_dev **hdev)
+int pn544_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, fw_download_t fw_download,
+ struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
u32 protocols;
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 5634ba215ead..c6fe3e11e0c8 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -16,9 +16,10 @@
typedef int (*fw_download_t)(void *context, const char *firmware_name,
u8 hw_variant);
-int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- fw_download_t fw_download, struct nfc_hci_dev **hdev);
+int pn544_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, fw_download_t fw_download,
+ struct nfc_hci_dev **hdev);
void pn544_hci_remove(struct nfc_hci_dev *hdev);
#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 4df926cc37d0..517376c43b86 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -217,7 +217,7 @@ struct port100_protocol {
u8 value;
} __packed;
-static struct port100_protocol
+static const struct port100_protocol
in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
{ PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
@@ -391,7 +391,7 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
},
};
-static struct port100_protocol
+static const struct port100_protocol
tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
{ PORT100_TG_PROT_END, 0 },
@@ -526,7 +526,7 @@ static inline u8 port100_checksum(u16 value)
}
/* The rule: sum(data elements) + checksum = 0 */
-static u8 port100_data_checksum(u8 *data, int datalen)
+static u8 port100_data_checksum(const u8 *data, int datalen)
{
u8 sum = 0;
int i;
@@ -568,10 +568,10 @@ static void port100_tx_update_payload_len(void *_frame, int len)
le16_add_cpu(&frame->datalen, len);
}
-static bool port100_rx_frame_is_valid(void *_frame)
+static bool port100_rx_frame_is_valid(const void *_frame)
{
u8 checksum;
- struct port100_frame *frame = _frame;
+ const struct port100_frame *frame = _frame;
if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
@@ -589,23 +589,24 @@ static bool port100_rx_frame_is_valid(void *_frame)
return true;
}
-static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+static bool port100_rx_frame_is_ack(const struct port100_ack_frame *frame)
{
return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
}
-static inline int port100_rx_frame_size(void *frame)
+static inline int port100_rx_frame_size(const void *frame)
{
- struct port100_frame *f = frame;
+ const struct port100_frame *f = frame;
return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
PORT100_FRAME_TAIL_LEN;
}
-static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+static bool port100_rx_frame_is_cmd_response(const struct port100 *dev,
+ const void *frame)
{
- struct port100_frame *f = frame;
+ const struct port100_frame *f = frame;
return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
}
@@ -655,7 +656,8 @@ sched_wq:
schedule_work(&dev->cmd_complete_work);
}
-static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+static int port100_submit_urb_for_response(const struct port100 *dev,
+ gfp_t flags)
{
dev->in_urb->complete = port100_recv_response;
@@ -666,7 +668,7 @@ static void port100_recv_ack(struct urb *urb)
{
struct port100 *dev = urb->context;
struct port100_cmd *cmd = dev->cmd;
- struct port100_ack_frame *in_frame;
+ const struct port100_ack_frame *in_frame;
int rc;
cmd->status = urb->status;
@@ -708,7 +710,7 @@ sched_wq:
schedule_work(&dev->cmd_complete_work);
}
-static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+static int port100_submit_urb_for_ack(const struct port100 *dev, gfp_t flags)
{
dev->in_urb->complete = port100_recv_ack;
@@ -753,8 +755,9 @@ static int port100_send_ack(struct port100 *dev)
return rc;
}
-static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
- struct sk_buff *in, int in_len)
+static int port100_send_frame_async(struct port100 *dev,
+ const struct sk_buff *out,
+ const struct sk_buff *in, int in_len)
{
int rc;
@@ -960,7 +963,7 @@ static void port100_abort_cmd(struct nfc_digital_dev *ddev)
usb_kill_urb(dev->in_urb);
}
-static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+static struct sk_buff *port100_alloc_skb(const struct port100 *dev, unsigned int size)
{
struct sk_buff *skb;
@@ -1098,7 +1101,7 @@ static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
{
struct port100 *dev = nfc_digital_get_drvdata(ddev);
- struct port100_protocol *protocols;
+ const struct port100_protocol *protocols;
struct sk_buff *skb;
struct sk_buff *resp;
int num_protocols;
@@ -1152,7 +1155,7 @@ static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
struct sk_buff *resp)
{
- struct port100_cb_arg *cb_arg = arg;
+ const struct port100_cb_arg *cb_arg = arg;
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
u32 status;
int rc;
@@ -1255,7 +1258,7 @@ static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
{
struct port100 *dev = nfc_digital_get_drvdata(ddev);
- struct port100_protocol *protocols;
+ const struct port100_protocol *protocols;
struct sk_buff *skb;
struct sk_buff *resp;
int rc;
@@ -1330,7 +1333,7 @@ static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
struct sk_buff *resp)
{
u32 status;
- struct port100_cb_arg *cb_arg = arg;
+ const struct port100_cb_arg *cb_arg = arg;
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
struct port100_tg_comm_rf_res *hdr;
@@ -1453,7 +1456,7 @@ static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
- struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ const struct port100 *dev = nfc_digital_get_drvdata(ddev);
struct sk_buff *skb;
skb = port100_alloc_skb(dev, 0);
@@ -1463,7 +1466,7 @@ static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
}
-static struct nfc_digital_ops port100_digital_ops = {
+static const struct nfc_digital_ops port100_digital_ops = {
.in_configure_hw = port100_in_configure_hw,
.in_send_cmd = port100_in_send_cmd,
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index 865d3e3d1528..1c412007fabb 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -143,11 +143,13 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
return nci_core_init(info->ndev);
}
-static struct nci_ops s3fwrn5_nci_ops = {
+static const struct nci_ops s3fwrn5_nci_ops = {
.open = s3fwrn5_nci_open,
.close = s3fwrn5_nci_close,
.send = s3fwrn5_nci_send,
.post_setup = s3fwrn5_nci_post_setup,
+ .prop_ops = s3fwrn5_nci_prop_ops,
+ .n_prop_ops = ARRAY_SIZE(s3fwrn5_nci_prop_ops),
};
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
@@ -167,9 +169,6 @@ int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
- s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
- &s3fwrn5_nci_ops.n_prop_ops);
-
info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
S3FWRN5_NFC_PROTOCOLS, 0, 0);
if (!info->ndev)
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index e3e72b8a29e3..1af7a1e632cf 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -421,10 +421,9 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
- ret = PTR_ERR(tfm);
dev_err(&fw_info->ndev->nfc_dev->dev,
"Cannot allocate shash (code=%pe)\n", tfm);
- goto out;
+ return PTR_ERR(tfm);
}
ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
@@ -433,7 +432,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Cannot compute hash (code=%d)\n", ret);
- goto out;
+ return ret;
}
/* Firmware update process */
@@ -446,7 +445,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Unable to enter update mode\n");
- goto out;
+ return ret;
}
for (off = 0; off < image_size; off += fw_info->sector_size) {
@@ -455,7 +454,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Firmware update error (code=%d)\n", ret);
- goto out;
+ return ret;
}
}
@@ -463,13 +462,12 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Unable to complete update mode\n");
- goto out;
+ return ret;
}
dev_info(&fw_info->ndev->nfc_dev->dev,
"Firmware update: success\n");
-out:
return ret;
}
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index f042d3eaf8f6..e374e670b36b 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -20,7 +20,7 @@ static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
return 0;
}
-static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
+const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_SET_RFREG),
@@ -43,12 +43,6 @@ static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
},
};
-void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n)
-{
- *ops = s3fwrn5_nci_prop_ops;
- *n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
-}
-
#define S3FWRN5_RFREG_SECTION_SIZE 252
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index a80f0fb082a8..c2d906591e9e 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -50,7 +50,7 @@ struct nci_prop_fw_cfg_rsp {
__u8 status;
};
-void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n);
+extern const struct nci_driver_ops s3fwrn5_nci_prop_ops[4];
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c
index 110ff1281e5f..72bb51efdf9c 100644
--- a/drivers/nfc/st-nci/core.c
+++ b/drivers/nfc/st-nci/core.c
@@ -86,7 +86,7 @@ static int st_nci_prop_rsp_packet(struct nci_dev *ndev,
return 0;
}
-static struct nci_driver_ops st_nci_prop_ops[] = {
+static const struct nci_driver_ops st_nci_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
ST_NCI_CORE_PROP),
@@ -94,7 +94,7 @@ static struct nci_driver_ops st_nci_prop_ops[] = {
},
};
-static struct nci_ops st_nci_ops = {
+static const struct nci_ops st_nci_ops = {
.init = st_nci_init,
.open = st_nci_open,
.close = st_nci_close,
@@ -131,6 +131,7 @@ int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
| NFC_PROTO_ISO15693_MASK
| NFC_PROTO_NFC_DEP_MASK;
+ BUILD_BUG_ON(ARRAY_SIZE(st_nci_prop_ops) > NCI_MAX_PROPRIETARY_CMD);
ndlc->ndev = nci_allocate_device(&st_nci_ops, protocols,
phy_headroom, phy_tailroom);
if (!ndlc->ndev) {
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 46981405e8b1..ccf6152ebb9f 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -186,7 +186,7 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = st_nci_i2c_write,
.enable = st_nci_i2c_enable,
.disable = st_nci_i2c_disable,
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 5d74c674368a..e9dc313b333e 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -253,9 +253,9 @@ static void ndlc_t2_timeout(struct timer_list *t)
schedule_work(&ndlc->sm_work);
}
-int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
- int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
- struct st_nci_se_status *se_status)
+int ndlc_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ struct device *dev, int phy_headroom, int phy_tailroom,
+ struct llt_ndlc **ndlc_id, struct st_nci_se_status *se_status)
{
struct llt_ndlc *ndlc;
diff --git a/drivers/nfc/st-nci/ndlc.h b/drivers/nfc/st-nci/ndlc.h
index 066e2fd75238..c24ce9b0df52 100644
--- a/drivers/nfc/st-nci/ndlc.h
+++ b/drivers/nfc/st-nci/ndlc.h
@@ -16,7 +16,7 @@ struct st_nci_se_status;
/* Low Level Transport description */
struct llt_ndlc {
struct nci_dev *ndev;
- struct nfc_phy_ops *ops;
+ const struct nfc_phy_ops *ops;
void *phy_id;
struct timer_list t1_timer;
@@ -45,8 +45,8 @@ int ndlc_open(struct llt_ndlc *ndlc);
void ndlc_close(struct llt_ndlc *ndlc);
int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb);
void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb);
-int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
- int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
- struct st_nci_se_status *se_status);
+int ndlc_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ struct device *dev, int phy_headroom, int phy_tailroom,
+ struct llt_ndlc **ndlc_id, struct st_nci_se_status *se_status);
void ndlc_remove(struct llt_ndlc *ndlc);
#endif /* __LOCAL_NDLC_H__ */
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 250d56f204c3..a620c34790e6 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -198,7 +198,7 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops spi_phy_ops = {
+static const struct nfc_phy_ops spi_phy_ops = {
.write = st_nci_spi_write,
.enable = st_nci_spi_enable,
.disable = st_nci_spi_disable,
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index 94b600029a2a..30d2912d1a05 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -371,7 +371,7 @@ static int st_nci_manufacturer_specific(struct nfc_dev *dev, void *data,
return nfc_vendor_cmd_reply(msg);
}
-static struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
+static const struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
{
.vendor_id = ST_NCI_VENDOR_OUI,
.subcmd = FACTORY_MODE,
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index 6ca0d2f56b18..5e6c99fcfd27 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -72,7 +72,7 @@
static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
-static struct nfc_hci_gate st21nfca_gates[] = {
+static const struct nfc_hci_gate st21nfca_gates[] = {
{NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
{ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
@@ -912,7 +912,7 @@ static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe,
}
}
-static struct nfc_hci_ops st21nfca_hci_ops = {
+static const struct nfc_hci_ops st21nfca_hci_ops = {
.open = st21nfca_hci_open,
.close = st21nfca_hci_close,
.load_session = st21nfca_hci_load_session,
@@ -935,7 +935,7 @@ static struct nfc_hci_ops st21nfca_hci_ops = {
.se_io = st21nfca_hci_se_io,
};
-int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
int phy_payload, struct nfc_hci_dev **hdev,
struct st21nfca_se_status *se_status)
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 7a9f4d71707e..1b44a37a71aa 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -76,8 +76,8 @@ struct st21nfca_i2c_phy {
struct mutex phy_lock;
};
-static u8 len_seq[] = { 16, 24, 12, 29 };
-static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
+static const u8 len_seq[] = { 16, 24, 12, 29 };
+static const u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
#define I2C_DUMP_SKB(info, skb) \
do { \
@@ -482,7 +482,7 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = st21nfca_hci_i2c_write,
.enable = st21nfca_hci_i2c_enable,
.disable = st21nfca_hci_i2c_disable,
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 5e0de0fef1d4..cb6ad916be91 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -144,7 +144,7 @@ struct st21nfca_se_info {
};
struct st21nfca_hci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -163,7 +163,7 @@ struct st21nfca_hci_info {
struct st21nfca_vendor_info vendor_info;
};
-int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
int phy_payload, struct nfc_hci_dev **hdev,
struct st21nfca_se_status *se_status);
diff --git a/drivers/nfc/st21nfca/vendor_cmds.c b/drivers/nfc/st21nfca/vendor_cmds.c
index 62332ca91554..74882866dbaf 100644
--- a/drivers/nfc/st21nfca/vendor_cmds.c
+++ b/drivers/nfc/st21nfca/vendor_cmds.c
@@ -295,7 +295,7 @@ exit:
return r;
}
-static struct nfc_vendor_cmd st21nfca_vendor_cmds[] = {
+static const struct nfc_vendor_cmd st21nfca_vendor_cmds[] = {
{
.vendor_id = ST21NFCA_VENDOR_OUI,
.subcmd = FACTORY_MODE,
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 2dc788c363fd..993818742570 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1037,7 +1037,7 @@ static void st95hf_abort_cmd(struct nfc_digital_dev *ddev)
{
}
-static struct nfc_digital_ops st95hf_nfc_digital_ops = {
+static const struct nfc_digital_ops st95hf_nfc_digital_ops = {
.in_configure_hw = st95hf_in_configure_hw,
.in_send_cmd = st95hf_in_send_cmd,
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 33978022ae47..8890fcd59c39 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -643,7 +643,7 @@ static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
}
static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
- unsigned int len, u8 *prefix,
+ unsigned int len, const u8 *prefix,
unsigned int prefix_len)
{
struct spi_transfer t[2];
@@ -1387,9 +1387,10 @@ static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
}
}
-static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
+static int trf7970a_per_cmd_config(struct trf7970a *trf,
+ const struct sk_buff *skb)
{
- u8 *req = skb->data;
+ const u8 *req = skb->data;
u8 special_fcn_reg1, iso_ctrl;
int ret;
@@ -1791,7 +1792,7 @@ out_err:
static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Listen - state: %d, timeout: %d ms\n",
trf->state, timeout);
@@ -1803,7 +1804,7 @@ static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev,
u16 timeout, nfc_digital_cmd_complete_t cb,
void *arg)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
dev_dbg(trf->dev, "Listen MD - state: %d, timeout: %d ms\n",
@@ -1824,7 +1825,7 @@ static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev,
static int trf7970a_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Get RF Tech - state: %d, rf_tech: %d\n",
trf->state, trf->md_rf_tech);
@@ -1861,7 +1862,7 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
mutex_unlock(&trf->lock);
}
-static struct nfc_digital_ops trf7970a_nfc_ops = {
+static const struct nfc_digital_ops trf7970a_nfc_ops = {
.in_configure_hw = trf7970a_in_configure_hw,
.in_send_cmd = trf7970a_send_cmd,
.tg_configure_hw = trf7970a_tg_configure_hw,
@@ -1974,7 +1975,7 @@ static void trf7970a_shutdown(struct trf7970a *trf)
trf7970a_power_down(trf);
}
-static int trf7970a_get_autosuspend_delay(struct device_node *np)
+static int trf7970a_get_autosuspend_delay(const struct device_node *np)
{
int autosuspend_delay, ret;
@@ -1987,7 +1988,7 @@ static int trf7970a_get_autosuspend_delay(struct device_node *np)
static int trf7970a_probe(struct spi_device *spi)
{
- struct device_node *np = spi->dev.of_node;
+ const struct device_node *np = spi->dev.of_node;
struct trf7970a *trf;
int uvolts, autosuspend_delay, ret;
u32 clk_freq = TRF7970A_13MHZ_CLOCK_FREQUENCY;
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index f73ee0bf3593..2ee0ec4bb739 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -65,7 +65,7 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return 0;
}
-static struct nci_ops virtual_nci_ops = {
+static const struct nci_ops virtual_nci_ops = {
.open = virtual_nci_open,
.close = virtual_nci_close,
.send = virtual_nci_send
@@ -170,7 +170,7 @@ static int virtual_ncidev_close(struct inode *inode, struct file *file)
static long virtual_ncidev_ioctl(struct file *flip, unsigned int cmd,
unsigned long arg)
{
- struct nfc_dev *nfc_dev = ndev->nfc_dev;
+ const struct nfc_dev *nfc_dev = ndev->nfc_dev;
void __user *p = (void __user *)arg;
if (cmd != IOCTL_GET_NCIDEV_IDX)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 9748165e08e9..acbe76a76fb2 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -504,28 +504,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
- * get_ccwgroupdev_by_busid() - obtain device from a bus id
- * @gdrv: driver the device is owned by
- * @bus_id: bus id of the device to be searched
- *
- * This function searches all devices owned by @gdrv for a device with a bus
- * id matching @bus_id.
- * Returns:
- * If a match is found, its reference count of the found device is increased
- * and it is returned; else %NULL is returned.
- */
-struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
- char *bus_id)
-{
- struct device *dev;
-
- dev = driver_find_device_by_name(&gdrv->driver, bus_id);
-
- return dev ? to_ccwgroupdev(dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
-
-/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index bf236d474538..cff91b4f1a76 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -88,15 +88,6 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
-config QETH_OSN
- def_bool !HAVE_MARCH_Z14_FEATURES
- prompt "qeth OSN device support"
- depends on QETH
- help
- This enables the qeth driver to support devices in OSN mode.
- This feature will be removed in 2021.
- If unsure, choose N.
-
config QETH_OSX
def_bool !HAVE_MARCH_Z15_FEATURES
prompt "qeth OSX device support"
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f4d554ea0c93..535a60b3946d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -259,22 +259,10 @@ struct qeth_hdr_layer2 {
__u8 reserved2[16];
} __attribute__ ((packed));
-struct qeth_hdr_osn {
- __u8 id;
- __u8 reserved;
- __u16 seq_no;
- __u16 reserved2;
- __u16 control_flags;
- __u16 pdu_length;
- __u8 reserved3[18];
- __u32 ccid;
-} __attribute__ ((packed));
-
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
- struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
@@ -341,7 +329,6 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_L3_TSO = 0x03,
- QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
QETH_HEADER_MASK_INVAL = 0x80,
};
@@ -779,18 +766,13 @@ enum qeth_threads {
QETH_RECOVER_THREAD = 1,
};
-struct qeth_osn_info {
- int (*assist_cb)(struct net_device *dev, void *data);
- int (*data_cb)(struct sk_buff *skb);
-};
-
struct qeth_discipline {
- const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
- int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
+ int (*do_ioctl)(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
};
@@ -865,7 +847,6 @@ struct qeth_card {
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
int read_or_write_problem;
- struct qeth_osn_info osn_info;
const struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
@@ -1058,10 +1039,7 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern const struct qeth_discipline qeth_l2_discipline;
extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
-extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_dev_groups[];
-extern const struct attribute_group *qeth_osn_dev_groups[];
-extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
@@ -1069,11 +1047,9 @@ int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
-extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
-struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
@@ -1088,9 +1064,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
unsigned int data_length);
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code,
@@ -1099,18 +1072,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
enum qeth_diags_cmds sub_cmd,
unsigned int data_length);
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
-void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
@@ -1118,12 +1085,9 @@ int qeth_query_card_info(struct qeth_card *card,
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode);
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
@@ -1148,11 +1112,4 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len));
-/* exports for OSN */
-int qeth_osn_assist(struct net_device *, void *, int);
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *));
-void qeth_osn_deregister(struct net_device *);
-
#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 62f88ccbd03f..5b973f377504 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -57,8 +57,7 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
};
EXPORT_SYMBOL_GPL(qeth_dbf);
-struct kmem_cache *qeth_core_header_cache;
-EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_core_header_cache;
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
@@ -101,8 +100,6 @@ static const char *qeth_get_cardname(struct qeth_card *card)
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return " OSN QDIO";
case QETH_CARD_TYPE_OSM:
return " OSM QDIO";
case QETH_CARD_TYPE_OSX:
@@ -157,8 +154,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return "OSN";
case QETH_CARD_TYPE_OSM:
return "OSM_1000";
case QETH_CARD_TYPE_OSX:
@@ -431,6 +426,13 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
+static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
+{
+ if (refcount_dec_and_test(&iob->ref_count)) {
+ kfree(iob->data);
+ kfree(iob);
+ }
+}
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -499,12 +501,11 @@ static void qeth_dequeue_cmd(struct qeth_card *card,
spin_unlock_irq(&card->lock);
}
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
+static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
{
iob->rc = reason;
complete(&iob->done);
}
-EXPORT_SYMBOL_GPL(qeth_notify_cmd);
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
@@ -781,10 +782,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command != IPA_CMD_SETCCID &&
- cmd->hdr.command != IPA_CMD_DELCCID &&
- cmd->hdr.command != IPA_CMD_MODCCID &&
- cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+ if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
return cmd;
}
@@ -819,8 +817,6 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (card->discipline->control_event_handler(card, cmd))
return cmd;
return NULL;
- case IPA_CMD_MODCCID:
- return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
@@ -877,15 +873,6 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-void qeth_put_cmd(struct qeth_cmd_buffer *iob)
-{
- if (refcount_dec_and_test(&iob->ref_count)) {
- kfree(iob->data);
- kfree(iob);
- }
-}
-EXPORT_SYMBOL_GPL(qeth_put_cmd);
-
static void qeth_release_buffer_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
@@ -899,9 +886,9 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
qeth_put_cmd(iob);
}
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout)
+static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length,
+ unsigned int ccws, long timeout)
{
struct qeth_cmd_buffer *iob;
@@ -927,7 +914,6 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
iob->length = length;
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -958,11 +944,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
- if (IS_OSN(card) && card->osn_info.assist_cb &&
- cmd->hdr.command != IPA_CMD_STARTLAN) {
- card->osn_info.assist_cb(card->dev, cmd);
- goto out;
- }
}
/* match against pending cmd requests */
@@ -1835,7 +1816,7 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (IS_OSM(card) || IS_OSN(card))
+ if (IS_OSM(card))
disc = QETH_DISCIPLINE_LAYER2;
else if (IS_VM_NIC(card))
disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
@@ -1885,7 +1866,6 @@ static void qeth_idx_init(struct qeth_card *card)
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -2442,9 +2422,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
- if (IS_OSN(card))
- return QETH_PROT_OSN2;
- return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+ return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
}
static int qeth_ulp_enable(struct qeth_card *card)
@@ -3000,10 +2978,8 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
}
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply))
+static void qeth_prepare_ipa_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 cmd_length)
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -3011,7 +2987,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
- iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -3022,7 +2997,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
struct qeth_cmd_buffer *reply)
@@ -3046,7 +3020,8 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
+ qeth_prepare_ipa_cmd(card, iob, data_length);
+ iob->match = qeth_ipa_match_reply;
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -3894,7 +3869,8 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
* skb's data (both its linear part and paged fragments).
*/
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
+static unsigned int qeth_count_elements(struct sk_buff *skb,
+ unsigned int data_offset)
{
unsigned int elements = qeth_get_elements_for_frags(skb);
addr_t end = (addr_t)skb->data + skb_headlen(skb);
@@ -3904,7 +3880,6 @@ unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
elements += qeth_get_elements_for_range(start, end);
return elements;
}
-EXPORT_SYMBOL_GPL(qeth_count_elements);
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
MAX_TCP_HEADER)
@@ -4192,10 +4167,11 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
return 0;
}
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed)
+static int qeth_do_send_packet(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len,
+ unsigned int elements_needed)
{
unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
@@ -4275,7 +4251,6 @@ out:
netif_tx_start_queue(txq);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
unsigned int payload_len, struct sk_buff *skb,
@@ -4554,7 +4529,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
- (card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
@@ -5266,10 +5240,6 @@ static struct ccw_device_id qeth_ids[] = {
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
-#ifdef CONFIG_QETH_OSN
- {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
- .driver_info = QETH_CARD_TYPE_OSN},
-#endif
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
#ifdef CONFIG_QETH_OSX
@@ -5628,14 +5598,6 @@ static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
bool is_cso;
switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_OSN:
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
- QETH_CARD_STAT_INC(card, rx_packets);
-
- card->osn_info.data_cb(skb);
- return;
#if IS_ENABLED(CONFIG_QETH_L3)
case QETH_HEADER_TYPE_LAYER3:
qeth_l3_rebuild_skb(card, skb, hdr);
@@ -5750,16 +5712,6 @@ next_packet:
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
- case QETH_HEADER_TYPE_OSN:
- skb_len = hdr->hdr.osn.pdu_length;
- if (!IS_OSN(card)) {
- QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
- goto walk_packet;
- }
-
- linear_len = skb_len;
- headroom = sizeof(struct qeth_hdr);
- break;
default:
if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
@@ -5777,8 +5729,7 @@ next_packet:
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
(skb_len > READ_ONCE(priv->rx_copybreak) &&
- !atomic_read(&card->force_alloc_skb) &&
- !IS_OSN(card));
+ !atomic_read(&card->force_alloc_skb));
if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
@@ -6335,14 +6286,9 @@ void qeth_remove_discipline(struct qeth_card *card)
card->discipline = NULL;
}
-const struct device_type qeth_generic_devtype = {
+static const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
};
-EXPORT_SYMBOL_GPL(qeth_generic_devtype);
-
-static const struct device_type qeth_osn_devtype = {
- .name = "qeth_osn",
-};
#define DBF_NAME_LEN 20
@@ -6425,10 +6371,6 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(sizeof(*priv));
break;
- case QETH_CARD_TYPE_OSN:
- dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
default:
dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
@@ -6442,23 +6384,19 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
- dev->min_mtu = IS_OSN(card) ? 64 : 576;
+ dev->min_mtu = 576;
/* initialized when device first goes online: */
dev->max_mtu = 0;
dev->mtu = 0;
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (IS_OSN(card)) {
- dev->ethtool_ops = &qeth_osn_ethtool_ops;
- } else {
- dev->ethtool_ops = &qeth_ethtool_ops;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
- dev->features |= NETIF_F_SG;
- }
+ dev->ethtool_ops = &qeth_ethtool_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
return dev;
}
@@ -6521,10 +6459,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_chp_desc;
- if (IS_OSN(card))
- gdev->dev.groups = qeth_osn_dev_groups;
- else
- gdev->dev.groups = qeth_dev_groups;
+ gdev->dev.groups = qeth_dev_groups;
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -6538,8 +6473,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_setup_disc;
- gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
- card->discipline->devtype;
break;
}
@@ -6657,36 +6590,42 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.shutdown = qeth_core_shutdown,
};
-struct qeth_card *qeth_get_card_by_busid(char *bus_id)
-{
- struct ccwgroup_device *gdev;
- struct qeth_card *card;
-
- gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
- if (!gdev)
- return NULL;
-
- card = dev_get_drvdata(&gdev->dev);
- put_device(&gdev->dev);
- return card;
-}
-EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
-
-int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
- struct mii_ioctl_data *mii_data;
int rc = 0;
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
- rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_snmp_command(card, data);
break;
case SIOC_QETH_GET_CARD_TYPE:
if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
!IS_VM_NIC(card))
return 1;
return 0;
+ case SIOC_QETH_QUERY_OAT:
+ rc = qeth_query_oat_command(card, data);
+ break;
+ default:
+ if (card->discipline->do_ioctl)
+ rc = card->discipline->do_ioctl(dev, rq, data, cmd);
+ else
+ rc = -EOPNOTSUPP;
+ }
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
+
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct mii_ioctl_data *mii_data;
+ int rc = 0;
+
+ switch (cmd) {
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -6699,14 +6638,8 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mii_data->val_out = qeth_mdio_read(dev,
mii_data->phy_id, mii_data->reg_num);
break;
- case SIOC_QETH_QUERY_OAT:
- rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
- break;
default:
- if (card->discipline->do_ioctl)
- rc = card->discipline->do_ioctl(dev, rq, cmd);
- else
- rc = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 68c2588b9dcc..d9266f7d8187 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -232,9 +232,6 @@ static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_VNICC, "vnic_characteristics"},
{IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
- {IPA_CMD_SETCCID, "setccid"},
- {IPA_CMD_DELCCID, "delccid"},
- {IPA_CMD_MODCCID, "modccid"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e4bde7daf083..6257f00786b3 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -34,8 +34,6 @@ extern const unsigned char IPA_PDU_HEADER[];
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
#define IPA_CMD_INITIATOR_OSA 0x01
-#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
-#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
struct qeth_ipa_caps {
@@ -66,7 +64,6 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
- QETH_CARD_TYPE_OSN = 6,
QETH_CARD_TYPE_OSM = 3,
QETH_CARD_TYPE_OSX = 2,
};
@@ -75,12 +72,6 @@ enum qeth_card_types {
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
-#ifdef CONFIG_QETH_OSN
-#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
-#else
-#define IS_OSN(card) false
-#endif
-
#ifdef CONFIG_QETH_OSX
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
#else
@@ -95,7 +86,6 @@ enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
- QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
@@ -126,9 +116,6 @@ enum qeth_ipa_cmds {
IPA_CMD_DELVLAN = 0x26,
IPA_CMD_VNICC = 0x2a,
IPA_CMD_SETBRIDGEPORT_OSA = 0x2b,
- IPA_CMD_SETCCID = 0x41,
- IPA_CMD_DELCCID = 0x42,
- IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_QIPASSIST = 0xb2,
IPA_CMD_SETASSPARMS = 0xb3,
@@ -879,8 +866,7 @@ extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* Helper functions */
-#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
- (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+#define IS_IPA_REPLY(cmd) ((cmd)->hdr.initiator == IPA_CMD_INITIATOR_HOST)
/*****************************************************************************/
/* END OF IP Assist related definitions */
@@ -919,10 +905,9 @@ extern const unsigned char ULP_ENABLE[];
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
-/* Layer 2 definitions */
-#define QETH_PROT_LAYER2 0x08
-#define QETH_PROT_TCPIP 0x03
-#define QETH_PROT_OSN2 0x0a
+
+#define QETH_MPC_PROT_L2 0x08
+#define QETH_MPC_PROT_L3 0x03
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 5815114da468..406be169173c 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -671,11 +671,6 @@ static const struct attribute_group qeth_dev_group = {
.attrs = qeth_dev_attrs,
};
-const struct attribute_group *qeth_osn_dev_groups[] = {
- &qeth_dev_group,
- NULL,
-};
-
const struct attribute_group *qeth_dev_groups[] = {
&qeth_dev_group,
&qeth_dev_extended_group,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 2c4cb300a8fc..3937986f159a 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -469,10 +469,3 @@ const struct ethtool_ops qeth_ethtool_ops = {
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
-
-const struct ethtool_ops qeth_osn_ethtool_ops = {
- .get_strings = qeth_get_strings,
- .get_ethtool_stats = qeth_get_ethtool_stats,
- .get_sset_count = qeth_get_sset_count,
- .get_drvinfo = qeth_get_drvinfo,
-};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2abf86c104d5..69afc0311dd1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -309,17 +309,16 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (!IS_OSN(card)) {
- rc = qeth_setadpparms_change_macaddr(card);
- if (!rc)
- goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
- CARD_DEVID(card), rc);
- QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* fall back once more: */
- }
+ rc = qeth_setadpparms_change_macaddr(card);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* some devices don't support a custom MAC address: */
+ /* Fall back once more, but some devices don't support a custom MAC
+ * address:
+ */
if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
@@ -334,7 +333,7 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
if (!is_valid_ether_addr(card->dev->dev_addr))
qeth_l2_request_initial_mac(card);
- if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
card->info.dev_addr_is_registered = 1;
else
card->info.dev_addr_is_registered = 0;
@@ -496,44 +495,6 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
qeth_l2_set_promisc_mode(card);
}
-static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue)
-{
- gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
- struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
- addr_t end = (addr_t)(skb->data + sizeof(*hdr));
- addr_t start = (addr_t)skb->data;
- unsigned int elements = 0;
- unsigned int hd_len = 0;
- int rc;
-
- if (skb->protocol == htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
-
- if (qeth_get_elements_for_range(start, end) > 1) {
- /* Misaligned HW header, move it to its own buffer element. */
- hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
- if (!hdr)
- return -ENOMEM;
- hd_len = sizeof(*hdr);
- skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
- elements++;
- }
-
- elements += qeth_count_elements(skb, hd_len);
- if (elements > queue->max_elements) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
- elements);
-out:
- if (rc && hd_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- return rc;
-}
-
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -548,12 +509,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
- if (IS_OSN(card))
- rc = qeth_l2_xmit_osn(card, skb, queue);
- else
- rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
- qeth_l2_fill_header);
-
+ rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
+ qeth_l2_fill_header);
if (!rc)
return NETDEV_TX_OK;
@@ -879,7 +836,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
@@ -890,23 +848,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
-static const struct net_device_ops qeth_osn_netdev_ops = {
- .ndo_open = qeth_open,
- .ndo_stop = qeth_stop,
- .ndo_get_stats64 = qeth_get_stats64,
- .ndo_start_xmit = qeth_l2_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qeth_tx_timeout,
-};
-
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- if (IS_OSN(card)) {
- card->dev->netdev_ops = &qeth_osn_netdev_ops;
- card->dev->flags |= IFF_NOARP;
- goto add_napi;
- }
-
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -952,7 +895,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
-add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
return register_netdev(card->dev);
}
@@ -1044,84 +986,6 @@ static void qeth_l2_enable_brport_features(struct qeth_card *card)
}
}
-#ifdef CONFIG_QETH_OSN
-static void qeth_osn_assist_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int data_length)
-{
- qeth_notify_cmd(iob, 0);
- qeth_put_cmd(iob);
-}
-
-int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
-{
- struct qeth_cmd_buffer *iob;
- struct qeth_card *card;
-
- if (data_len < 0)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- card = dev->ml_priv;
- if (!card)
- return -ENODEV;
- QETH_CARD_TEXT(card, 2, "osnsdmc");
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
- iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1,
- QETH_IPA_TIMEOUT);
- if (!iob)
- return -ENOMEM;
-
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
-
- memcpy(__ipa_cmd(iob), data, data_len);
- iob->callback = qeth_osn_assist_cb;
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-EXPORT_SYMBOL(qeth_osn_assist);
-
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *))
-{
- struct qeth_card *card;
- char bus_id[16];
- u16 devno;
-
- memcpy(&devno, read_dev_no, 2);
- sprintf(bus_id, "0.0.%04x", devno);
- card = qeth_get_card_by_busid(bus_id);
- if (!card || !IS_OSN(card))
- return -ENODEV;
- *dev = card->dev;
-
- QETH_CARD_TEXT(card, 2, "osnreg");
- if ((assist_cb == NULL) || (data_cb == NULL))
- return -EINVAL;
- card->osn_info.assist_cb = assist_cb;
- card->osn_info.data_cb = data_cb;
- return 0;
-}
-EXPORT_SYMBOL(qeth_osn_register);
-
-void qeth_osn_deregister(struct net_device *dev)
-{
- struct qeth_card *card;
-
- if (!dev)
- return;
- card = dev->ml_priv;
- if (!card)
- return;
- QETH_CARD_TEXT(card, 2, "osndereg");
- card->osn_info.assist_cb = NULL;
- card->osn_info.data_cb = NULL;
-}
-EXPORT_SYMBOL(qeth_osn_deregister);
-#endif
-
/* SETBRIDGEPORT support, async notifications */
enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
@@ -2190,16 +2054,15 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
if (rc)
return rc;
+ } else {
+ gdev->dev.type = &qeth_l2_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
@@ -2210,8 +2073,9 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (gdev->dev.type == &qeth_generic_devtype)
+ if (gdev->dev.type != &qeth_l2_devtype)
device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
+
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -2331,7 +2195,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f0d6f205c53c..3a523e700a5a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1512,7 +1512,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
return rc;
}
-static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
@@ -1532,13 +1532,13 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EPERM;
break;
}
- rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_l3_arp_query(card, data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
case SIOC_QETH_ARP_REMOVE_ENTRY:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
return -EFAULT;
arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
@@ -1841,7 +1841,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1856,7 +1857,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1940,12 +1942,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
if (!card->cmd_wq)
return -ENOMEM;
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
if (rc) {
destroy_workqueue(card->cmd_wq);
return rc;
}
+ } else {
+ gdev->dev.type = &qeth_l3_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
@@ -1956,7 +1960,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- if (cgdev->dev.type == &qeth_generic_devtype)
+ if (cgdev->dev.type != &qeth_l3_devtype)
device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
qeth_set_allowed_threads(card, 0, 1);
@@ -2065,7 +2069,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l3_discipline = {
- .devtype = &qeth_l3_devtype,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index dcbba9621b21..5d24c1b6663b 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -524,7 +524,7 @@ static const struct net_device_ops cvm_oct_npi_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -540,7 +540,7 @@ static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -556,7 +556,7 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -570,7 +570,7 @@ static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -586,7 +586,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -599,7 +599,7 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit_pow,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 5012b9176526..34decb03e92f 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -22,6 +22,8 @@ void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int rtw_android_priv_cmd(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
struct net_device *rtw_init_netdev(void);
u16 rtw_recv_select_queue(struct sk_buff *skb);
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index 2c26993b8205..3018fc1e8de8 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -45,6 +45,7 @@ enum ANDROID_WIFI_CMD {
ANDROID_WIFI_CMD_MAX
};
-int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr,
+ void __user *data, int cmd);
#endif /* __RTW_ANDROID_H__ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index b958a8d882b0..193a3dde462c 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2769,9 +2769,6 @@ int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
break;
#endif /* CONFIG_88EU_AP_MODE */
- case (SIOCDEVPRIVATE + 1):
- ret = rtw_android_priv_cmd(dev, rq, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 423c382e3d20..596e03e7b286 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -288,6 +288,7 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
.ndo_do_ioctl = rtw_ioctl,
+ .ndo_siocdevprivate = rtw_android_priv_cmd,
};
static const struct device_type wlan_type = {
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 3c5446999686..a13df3880378 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -5,6 +5,7 @@
*
******************************************************************************/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -116,7 +117,8 @@ static int android_get_p2p_addr(struct net_device *net, char *command,
return ETH_ALEN;
}
-int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr,
+ void __user *data, int cmd)
{
int ret = 0;
char *command;
@@ -124,9 +126,15 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- if (!ifr->ifr_data)
+ if (cmd != SIOCDEVPRIVATE)
+ return -EOPNOTSUPP;
+
+ if (in_compat_syscall()) /* to be implemented */
+ return -EOPNOTSUPP;
+
+ if (!data)
return -EINVAL;
- if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(priv_cmd)))
+ if (copy_from_user(&priv_cmd, data, sizeof(priv_cmd)))
return -EFAULT;
if (priv_cmd.total_len < 1)
return -EINVAL;
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 111e0179712a..5badd441c14b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -48,6 +48,8 @@ void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int rtw_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index f95000df8942..aa7bd76bb5f1 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -4485,6 +4485,21 @@ exit:
return err;
}
+int rtw_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)rq;
+
+ /* little hope of fixing this, better remove the whole function */
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -EOPNOTSUPP;
+
+ return rtw_ioctl_wext_private(dev, &wrq->u);
+}
+
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *)rq;
@@ -4497,9 +4512,6 @@ int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case RTL_IOCTL_HOSTAPD:
ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
break;
- case SIOCDEVPRIVATE:
- ret = rtw_ioctl_wext_private(dev, &wrq->u);
- break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 648456b992bb..9e38b53d3b4a 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -459,6 +459,7 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
.ndo_do_ioctl = rtw_ioctl,
+ .ndo_siocdevprivate = rtw_siocdevprivate,
};
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 6f470e7ba647..1c62130a5eee 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -98,8 +98,8 @@ static int p80211knetdev_stop(struct net_device *netdev);
static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev);
static void p80211knetdev_set_multicast_list(struct net_device *dev);
-static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd);
+static int p80211knetdev_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
@@ -461,56 +461,8 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
wlandev->set_multicast_list(wlandev, dev);
}
-#ifdef SIOCETHTOOL
-
-static int p80211netdev_ethtool(struct wlandevice *wlandev,
- void __user *useraddr)
-{
- u32 ethcmd;
- struct ethtool_drvinfo info;
- struct ethtool_value edata;
-
- memset(&info, 0, sizeof(info));
- memset(&edata, 0, sizeof(edata));
-
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
-
- switch (ethcmd) {
- case ETHTOOL_GDRVINFO:
- info.cmd = ethcmd;
- snprintf(info.driver, sizeof(info.driver), "p80211_%s",
- wlandev->nsdname);
- snprintf(info.version, sizeof(info.version), "%s",
- WLAN_RELEASE);
-
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-#ifdef ETHTOOL_GLINK
- case ETHTOOL_GLINK:
- edata.cmd = ethcmd;
-
- if (wlandev->linkstatus &&
- (wlandev->macmode != WLAN_MACMODE_NONE)) {
- edata.data = 1;
- } else {
- edata.data = 0;
- }
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-#endif
- }
-
- return -EOPNOTSUPP;
-}
-
-#endif
-
/*----------------------------------------------------------------
- * p80211knetdev_do_ioctl
+ * p80211knetdev_siocdevprivate
*
* Handle an ioctl call on one of our devices. Everything Linux
* ioctl specific is done here. Then we pass the contents of the
@@ -537,8 +489,9 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev,
* locks.
*----------------------------------------------------------------
*/
-static int p80211knetdev_do_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int p80211knetdev_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd)
{
int result = 0;
struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -547,13 +500,8 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
netdev_dbg(dev, "rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len);
-#ifdef SIOCETHTOOL
- if (cmd == SIOCETHTOOL) {
- result =
- p80211netdev_ethtool(wlandev, (void __user *)ifr->ifr_data);
- goto bail;
- }
-#endif
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
/* Test the magic, assume ifr is good if it's there */
if (req->magic != P80211_IOCTL_MAGIC) {
@@ -569,7 +517,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
goto bail;
}
- msgbuf = memdup_user(req->data, req->len);
+ msgbuf = memdup_user(data, req->len);
if (IS_ERR(msgbuf)) {
result = PTR_ERR(msgbuf);
goto bail;
@@ -578,10 +526,8 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
result = p80211req_dorequest(wlandev, msgbuf);
if (result == 0) {
- if (copy_to_user
- (req->data, msgbuf, req->len)) {
+ if (copy_to_user(data, msgbuf, req->len))
result = -EFAULT;
- }
}
kfree(msgbuf);
@@ -682,7 +628,7 @@ static const struct net_device_ops p80211_netdev_ops = {
.ndo_stop = p80211knetdev_stop,
.ndo_start_xmit = p80211knetdev_hard_start_xmit,
.ndo_set_rx_mode = p80211knetdev_set_multicast_list,
- .ndo_do_ioctl = p80211knetdev_do_ioctl,
+ .ndo_siocdevprivate = p80211knetdev_siocdevprivate,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 5bb928b7873e..3e3b8873fa29 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1524,11 +1524,11 @@ static int hdlcdev_close(struct net_device *dev)
*
* Return: 0 if success, otherwise error code
*/
-static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdev_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct slgt_info *info = dev_to_port(dev);
unsigned int flags;
@@ -1538,17 +1538,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (info->port.count)
return -EBUSY;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
memset(&new_line, 0, sizeof(new_line));
- switch(ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
@@ -1615,7 +1612,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1688,7 +1685,7 @@ static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_siocwandev = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};